mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-12-28 11:24:07 -05:00
perf vendor events intel: Refresh jaketown metrics and events
Update the jaketown metrics and events using the new tooling from: https://github.com/intel/perfmon The metrics are unchanged but the formulas differ due to parentheses, use of exponents and removal of redundant operations like "* 1". The events are unchanged but unused json values are removed. The formatting changes increase consistency across the json files. Signed-off-by: Ian Rogers <irogers@google.com> Acked-by: Kan Liang <kan.liang@linux.intel.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Caleb Biggers <caleb.biggers@intel.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Perry Taylor <perry.taylor@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com> Link: https://lore.kernel.org/r/20221215065510.1621979-10-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
committed by
Arnaldo Carvalho de Melo
parent
8ee37818a0
commit
e85af8a641
File diff suppressed because it is too large
Load Diff
@@ -1,8 +1,6 @@
|
||||
[
|
||||
{
|
||||
"BriefDescription": "Cycles with any input/output SSE or FP assist.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"CounterMask": "1",
|
||||
"EventCode": "0xCA",
|
||||
"EventName": "FP_ASSIST.ANY",
|
||||
@@ -11,8 +9,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of SIMD FP assists due to input values.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0xCA",
|
||||
"EventName": "FP_ASSIST.SIMD_INPUT",
|
||||
"SampleAfterValue": "100003",
|
||||
@@ -20,8 +16,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of SIMD FP assists due to Output values.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0xCA",
|
||||
"EventName": "FP_ASSIST.SIMD_OUTPUT",
|
||||
"SampleAfterValue": "100003",
|
||||
@@ -29,8 +23,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of X87 assists due to input value.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0xCA",
|
||||
"EventName": "FP_ASSIST.X87_INPUT",
|
||||
"SampleAfterValue": "100003",
|
||||
@@ -38,8 +30,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of X87 assists due to output value.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0xCA",
|
||||
"EventName": "FP_ASSIST.X87_OUTPUT",
|
||||
"SampleAfterValue": "100003",
|
||||
@@ -47,8 +37,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x10",
|
||||
"EventName": "FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE",
|
||||
"SampleAfterValue": "2000003",
|
||||
@@ -56,8 +44,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x10",
|
||||
"EventName": "FP_COMP_OPS_EXE.SSE_PACKED_SINGLE",
|
||||
"SampleAfterValue": "2000003",
|
||||
@@ -65,8 +51,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar double-precision uops issued this cycle.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x10",
|
||||
"EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE",
|
||||
"SampleAfterValue": "2000003",
|
||||
@@ -74,8 +58,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x10",
|
||||
"EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE",
|
||||
"SampleAfterValue": "2000003",
|
||||
@@ -83,8 +65,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of FP Computational Uops Executed this cycle. The number of FADD, FSUB, FCOM, FMULs, integer MULsand IMULs, FDIVs, FPREMs, FSQRTS, integer DIVs, and IDIVs. This event does not distinguish an FADD used in the middle of a transcendental flow from a s.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x10",
|
||||
"EventName": "FP_COMP_OPS_EXE.X87",
|
||||
"SampleAfterValue": "2000003",
|
||||
@@ -92,8 +72,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0xC1",
|
||||
"EventName": "OTHER_ASSISTS.AVX_STORE",
|
||||
"SampleAfterValue": "100003",
|
||||
@@ -101,8 +79,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0xC1",
|
||||
"EventName": "OTHER_ASSISTS.AVX_TO_SSE",
|
||||
"SampleAfterValue": "100003",
|
||||
@@ -110,8 +86,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0xC1",
|
||||
"EventName": "OTHER_ASSISTS.SSE_TO_AVX",
|
||||
"SampleAfterValue": "100003",
|
||||
@@ -119,8 +93,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of AVX-256 Computational FP double precision uops issued this cycle.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x11",
|
||||
"EventName": "SIMD_FP_256.PACKED_DOUBLE",
|
||||
"SampleAfterValue": "2000003",
|
||||
@@ -128,8 +100,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of GSSE-256 Computational FP single precision uops issued this cycle.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x11",
|
||||
"EventName": "SIMD_FP_256.PACKED_SINGLE",
|
||||
"SampleAfterValue": "2000003",
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
[
|
||||
{
|
||||
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0xE6",
|
||||
"EventName": "BACLEARS.ANY",
|
||||
"SampleAfterValue": "100003",
|
||||
@@ -10,8 +8,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0xAB",
|
||||
"EventName": "DSB2MITE_SWITCHES.COUNT",
|
||||
"SampleAfterValue": "2000003",
|
||||
@@ -19,8 +15,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0xAB",
|
||||
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
|
||||
"PublicDescription": "This event counts the cycles attributed to a switch from the Decoded Stream Buffer (DSB), which holds decoded instructions, to the legacy decode pipeline. It excludes cycles when the back-end cannot accept new micro-ops. The penalty for these switches is potentially several cycles of instruction starvation, where no micro-ops are delivered to the back-end.",
|
||||
@@ -29,8 +23,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cases of cancelling valid Decode Stream Buffer (DSB) fill not because of exceeding way limit.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0xAC",
|
||||
"EventName": "DSB_FILL.ALL_CANCEL",
|
||||
"SampleAfterValue": "2000003",
|
||||
@@ -38,8 +30,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0xAC",
|
||||
"EventName": "DSB_FILL.EXCEED_DSB_LINES",
|
||||
"SampleAfterValue": "2000003",
|
||||
@@ -47,8 +37,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cases of cancelling valid DSB fill not because of exceeding way limit.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0xAC",
|
||||
"EventName": "DSB_FILL.OTHER_CANCEL",
|
||||
"SampleAfterValue": "2000003",
|
||||
@@ -56,8 +44,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x80",
|
||||
"EventName": "ICACHE.HIT",
|
||||
"SampleAfterValue": "2000003",
|
||||
@@ -65,8 +51,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Instruction cache, streaming buffer and victim cache misses.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x80",
|
||||
"EventName": "ICACHE.MISSES",
|
||||
"PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes unchacheable accesses.",
|
||||
@@ -75,8 +59,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"CounterMask": "4",
|
||||
"EventCode": "0x79",
|
||||
"EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
|
||||
@@ -85,8 +67,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"CounterMask": "1",
|
||||
"EventCode": "0x79",
|
||||
"EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
|
||||
@@ -95,8 +75,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles MITE is delivering 4 Uops.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"CounterMask": "4",
|
||||
"EventCode": "0x79",
|
||||
"EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
|
||||
@@ -105,8 +83,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles MITE is delivering any Uop.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"CounterMask": "1",
|
||||
"EventCode": "0x79",
|
||||
"EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
|
||||
@@ -115,8 +91,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"CounterMask": "1",
|
||||
"EventCode": "0x79",
|
||||
"EventName": "IDQ.DSB_CYCLES",
|
||||
@@ -125,8 +99,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x79",
|
||||
"EventName": "IDQ.DSB_UOPS",
|
||||
"SampleAfterValue": "2000003",
|
||||
@@ -134,8 +106,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Instruction Decode Queue (IDQ) empty cycles.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"EventCode": "0x79",
|
||||
"EventName": "IDQ.EMPTY",
|
||||
"SampleAfterValue": "2000003",
|
||||
@@ -143,8 +113,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x79",
|
||||
"EventName": "IDQ.MITE_ALL_UOPS",
|
||||
"SampleAfterValue": "2000003",
|
||||
@@ -152,8 +120,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"CounterMask": "1",
|
||||
"EventCode": "0x79",
|
||||
"EventName": "IDQ.MITE_CYCLES",
|
||||
@@ -162,8 +128,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x79",
|
||||
"EventName": "IDQ.MITE_UOPS",
|
||||
"SampleAfterValue": "2000003",
|
||||
@@ -171,8 +135,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"CounterMask": "1",
|
||||
"EventCode": "0x79",
|
||||
"EventName": "IDQ.MS_CYCLES",
|
||||
@@ -182,8 +144,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"CounterMask": "1",
|
||||
"EventCode": "0x79",
|
||||
"EventName": "IDQ.MS_DSB_CYCLES",
|
||||
@@ -192,8 +152,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"CounterMask": "1",
|
||||
"EdgeDetect": "1",
|
||||
"EventCode": "0x79",
|
||||
@@ -203,8 +161,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x79",
|
||||
"EventName": "IDQ.MS_DSB_UOPS",
|
||||
"SampleAfterValue": "2000003",
|
||||
@@ -212,8 +168,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x79",
|
||||
"EventName": "IDQ.MS_MITE_UOPS",
|
||||
"SampleAfterValue": "2000003",
|
||||
@@ -221,8 +175,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"CounterMask": "1",
|
||||
"EdgeDetect": "1",
|
||||
"EventCode": "0x79",
|
||||
@@ -232,8 +184,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x79",
|
||||
"EventName": "IDQ.MS_UOPS",
|
||||
"SampleAfterValue": "2000003",
|
||||
@@ -241,8 +191,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled .",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"EventCode": "0x9C",
|
||||
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
|
||||
"PublicDescription": "This event counts the number of uops not delivered to the back-end per cycle, per thread, when the back-end was not stalled. In the ideal case 4 uops can be delivered each cycle. The event counts the undelivered uops - so if 3 were delivered in one cycle, the counter would be incremented by 1 for that cycle (4 - 3). If the back-end is stalled, the count for this event is not incremented even when uops were not delivered, because the back-end would not have been able to accept them. This event is used in determining the front-end bound category of the top-down pipeline slots characterization.",
|
||||
@@ -251,8 +199,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"CounterMask": "4",
|
||||
"EventCode": "0x9C",
|
||||
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
|
||||
@@ -261,8 +207,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"CounterMask": "1",
|
||||
"EventCode": "0x9C",
|
||||
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
|
||||
@@ -272,8 +216,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles when 1 or more uops were delivered to the by the front end.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"CounterMask": "4",
|
||||
"EventCode": "0x9C",
|
||||
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_GE_1_UOP_DELIV.CORE",
|
||||
@@ -283,8 +225,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"CounterMask": "3",
|
||||
"EventCode": "0x9C",
|
||||
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
|
||||
@@ -293,8 +233,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"CounterMask": "2",
|
||||
"EventCode": "0x9C",
|
||||
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
|
||||
@@ -303,8 +241,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"CounterMask": "1",
|
||||
"EventCode": "0x9C",
|
||||
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
|
||||
|
||||
@@ -65,7 +65,7 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
|
||||
"MetricExpr": "(UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * ((INT_MISC.RECOVERY_CYCLES_ANY / 2) if #SMT_on else INT_MISC.RECOVERY_CYCLES)) / SLOTS",
|
||||
"MetricExpr": "(UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (INT_MISC.RECOVERY_CYCLES_ANY / 2 if #SMT_on else INT_MISC.RECOVERY_CYCLES)) / SLOTS",
|
||||
"MetricGroup": "TopdownL1;tma_L1_group",
|
||||
"MetricName": "tma_bad_speculation",
|
||||
"PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
|
||||
@@ -73,7 +73,7 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
|
||||
"MetricExpr": "(BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT)) * tma_bad_speculation",
|
||||
"MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
|
||||
"MetricGroup": "BadSpec;BrMispredicts;TopdownL2;tma_L2_group;tma_bad_speculation_group",
|
||||
"MetricName": "tma_branch_mispredicts",
|
||||
"PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
|
||||
@@ -97,7 +97,7 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
|
||||
"MetricExpr": "((min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_L1D_PENDING) + RESOURCE_STALLS.SB) / (min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_DISPATCH) + cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=1@ - cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=3@ if (IPC > 1.8) else cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=2@ - RS_EVENTS.EMPTY_CYCLES if (tma_fetch_latency > 0.1) else RESOURCE_STALLS.SB)) * tma_backend_bound",
|
||||
"MetricExpr": "(min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_L1D_PENDING) + RESOURCE_STALLS.SB) / (min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_DISPATCH) + cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=1@ - cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=3@ if IPC > 1.8 else (cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=2@ - RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else RESOURCE_STALLS.SB)) * tma_backend_bound",
|
||||
"MetricGroup": "Backend;TopdownL2;tma_L2_group;tma_backend_bound_group",
|
||||
"MetricName": "tma_memory_bound",
|
||||
"PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
|
||||
@@ -113,7 +113,7 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
|
||||
"MetricExpr": "(MEM_LOAD_UOPS_RETIRED.LLC_HIT / (MEM_LOAD_UOPS_RETIRED.LLC_HIT + 7 * MEM_LOAD_UOPS_RETIRED.LLC_MISS)) * CYCLE_ACTIVITY.STALLS_L2_PENDING / CLKS",
|
||||
"MetricExpr": "MEM_LOAD_UOPS_RETIRED.LLC_HIT / (MEM_LOAD_UOPS_RETIRED.LLC_HIT + 7 * MEM_LOAD_UOPS_RETIRED.LLC_MISS) * CYCLE_ACTIVITY.STALLS_L2_PENDING / CLKS",
|
||||
"MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_memory_bound_group",
|
||||
"MetricName": "tma_l3_bound",
|
||||
"PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS",
|
||||
@@ -121,7 +121,7 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
|
||||
"MetricExpr": "(1 - (MEM_LOAD_UOPS_RETIRED.LLC_HIT / (MEM_LOAD_UOPS_RETIRED.LLC_HIT + 7 * MEM_LOAD_UOPS_RETIRED.LLC_MISS))) * CYCLE_ACTIVITY.STALLS_L2_PENDING / CLKS",
|
||||
"MetricExpr": "(1 - MEM_LOAD_UOPS_RETIRED.LLC_HIT / (MEM_LOAD_UOPS_RETIRED.LLC_HIT + 7 * MEM_LOAD_UOPS_RETIRED.LLC_MISS)) * CYCLE_ACTIVITY.STALLS_L2_PENDING / CLKS",
|
||||
"MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_memory_bound_group",
|
||||
"MetricName": "tma_dram_bound",
|
||||
"PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_MISS_PS",
|
||||
@@ -169,7 +169,7 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
|
||||
"MetricExpr": "((min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_DISPATCH) + cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=1@ - cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=3@ if (IPC > 1.8) else cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=2@ - RS_EVENTS.EMPTY_CYCLES if (tma_fetch_latency > 0.1) else RESOURCE_STALLS.SB) - RESOURCE_STALLS.SB - min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_L1D_PENDING)) / CLKS",
|
||||
"MetricExpr": "((min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_DISPATCH) + cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=1@ - cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=3@ if IPC > 1.8 else (cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=2@ - RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else RESOURCE_STALLS.SB)) - RESOURCE_STALLS.SB - min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_L1D_PENDING)) / CLKS",
|
||||
"MetricGroup": "PortsUtil;TopdownL3;tma_core_bound_group",
|
||||
"MetricName": "tma_ports_utilization",
|
||||
"PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
|
||||
@@ -233,7 +233,7 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
|
||||
"MetricExpr": "(UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY) * IDQ.MS_UOPS / SLOTS",
|
||||
"MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * IDQ.MS_UOPS / SLOTS",
|
||||
"MetricGroup": "MicroSeq;TopdownL3;tma_heavy_operations_group",
|
||||
"MetricName": "tma_microcode_sequencer",
|
||||
"PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS",
|
||||
@@ -284,19 +284,19 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Floating Point Operations Per Cycle",
|
||||
"MetricExpr": "(1 * (FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * (FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) + 8 * SIMD_FP_256.PACKED_SINGLE) / CORE_CLKS",
|
||||
"MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * (FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) + 8 * SIMD_FP_256.PACKED_SINGLE) / CORE_CLKS",
|
||||
"MetricGroup": "Flops;Ret",
|
||||
"MetricName": "FLOPc"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
|
||||
"MetricExpr": "UOPS_DISPATCHED.THREAD / ((cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
|
||||
"MetricExpr": "UOPS_DISPATCHED.THREAD / (cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
|
||||
"MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
|
||||
"MetricName": "ILP"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
|
||||
"MetricExpr": "((CPU_CLK_UNHALTED.THREAD / 2) * (1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK)) if #core_wide < 1 else (CPU_CLK_UNHALTED.THREAD_ANY / 2) if #SMT_on else CLKS",
|
||||
"MetricExpr": "(CPU_CLK_UNHALTED.THREAD / 2 * (1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK) if #core_wide < 1 else (CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else CLKS))",
|
||||
"MetricGroup": "SMT",
|
||||
"MetricName": "CORE_CLKS"
|
||||
},
|
||||
@@ -314,25 +314,25 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
|
||||
"MetricExpr": "IDQ.DSB_UOPS / ((IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS))",
|
||||
"MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
|
||||
"MetricGroup": "DSB;Fed;FetchBW",
|
||||
"MetricName": "DSB_Coverage"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Average CPU Utilization",
|
||||
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
|
||||
"MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
|
||||
"MetricGroup": "HPC;Summary",
|
||||
"MetricName": "CPU_Utilization"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
|
||||
"MetricExpr": "Turbo_Utilization * msr@tsc@ / 1000000000 / duration_time",
|
||||
"MetricExpr": "Turbo_Utilization * TSC / 1e9 / duration_time",
|
||||
"MetricGroup": "Power;Summary",
|
||||
"MetricName": "Average_Frequency"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Giga Floating Point Operations Per Second",
|
||||
"MetricExpr": "((1 * (FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * (FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) + 8 * SIMD_FP_256.PACKED_SINGLE) / 1000000000) / duration_time",
|
||||
"MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * (FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) + 8 * SIMD_FP_256.PACKED_SINGLE) / 1e9 / duration_time",
|
||||
"MetricGroup": "Cor;Flops;HPC",
|
||||
"MetricName": "GFLOPs",
|
||||
"PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
|
||||
@@ -345,7 +345,7 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
|
||||
"MetricExpr": "1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / (CPU_CLK_UNHALTED.REF_XCLK_ANY / 2) if #SMT_on else 0",
|
||||
"MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / (CPU_CLK_UNHALTED.REF_XCLK_ANY / 2) if #SMT_on else 0)",
|
||||
"MetricGroup": "SMT",
|
||||
"MetricName": "SMT_2T_Utilization"
|
||||
},
|
||||
@@ -363,10 +363,22 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
|
||||
"MetricExpr": "(64 * (uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@) / 1000000000) / duration_time",
|
||||
"MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time",
|
||||
"MetricGroup": "HPC;Mem;MemoryBW;SoC",
|
||||
"MetricName": "DRAM_BW_Use"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
|
||||
"MetricExpr": "1e9 * (UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182@ / UNC_C_TOR_INSERTS.MISS_OPCODE@filter_opc\\=0x182@) / (Socket_CLKS / duration_time)",
|
||||
"MetricGroup": "Mem;MemoryLat;SoC",
|
||||
"MetricName": "MEM_Read_Latency"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
|
||||
"MetricExpr": "UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182@ / UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182\\,thresh\\=1@",
|
||||
"MetricGroup": "Mem;MemoryBW;SoC",
|
||||
"MetricName": "MEM_Parallel_Reads"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Socket actual clocks when any core is active on that socket",
|
||||
"MetricExpr": "cbox_0@event\\=0x0@",
|
||||
@@ -379,52 +391,59 @@
|
||||
"MetricGroup": "Branches;OS",
|
||||
"MetricName": "IpFarBranch"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Uncore frequency per die [GHZ]",
|
||||
"MetricExpr": "Socket_CLKS / #num_dies / duration_time / 1e9",
|
||||
"MetricGroup": "SoC",
|
||||
"MetricName": "UNCORE_FREQ"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "C3 residency percent per core",
|
||||
"MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
|
||||
"MetricExpr": "cstate_core@c3\\-residency@ / TSC",
|
||||
"MetricGroup": "Power",
|
||||
"MetricName": "C3_Core_Residency"
|
||||
"MetricName": "C3_Core_Residency",
|
||||
"ScaleUnit": "100%"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "C6 residency percent per core",
|
||||
"MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
|
||||
"MetricExpr": "cstate_core@c6\\-residency@ / TSC",
|
||||
"MetricGroup": "Power",
|
||||
"MetricName": "C6_Core_Residency"
|
||||
"MetricName": "C6_Core_Residency",
|
||||
"ScaleUnit": "100%"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "C7 residency percent per core",
|
||||
"MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
|
||||
"MetricExpr": "cstate_core@c7\\-residency@ / TSC",
|
||||
"MetricGroup": "Power",
|
||||
"MetricName": "C7_Core_Residency"
|
||||
"MetricName": "C7_Core_Residency",
|
||||
"ScaleUnit": "100%"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "C2 residency percent per package",
|
||||
"MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
|
||||
"MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
|
||||
"MetricGroup": "Power",
|
||||
"MetricName": "C2_Pkg_Residency"
|
||||
"MetricName": "C2_Pkg_Residency",
|
||||
"ScaleUnit": "100%"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "C3 residency percent per package",
|
||||
"MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
|
||||
"MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
|
||||
"MetricGroup": "Power",
|
||||
"MetricName": "C3_Pkg_Residency"
|
||||
"MetricName": "C3_Pkg_Residency",
|
||||
"ScaleUnit": "100%"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "C6 residency percent per package",
|
||||
"MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
|
||||
"MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
|
||||
"MetricGroup": "Power",
|
||||
"MetricName": "C6_Pkg_Residency"
|
||||
"MetricName": "C6_Pkg_Residency",
|
||||
"ScaleUnit": "100%"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "C7 residency percent per package",
|
||||
"MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
|
||||
"MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
|
||||
"MetricGroup": "Power",
|
||||
"MetricName": "C7_Pkg_Residency"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Uncore frequency per die [GHZ]",
|
||||
"MetricExpr": "Socket_CLKS / #num_dies / duration_time / 1000000000",
|
||||
"MetricGroup": "SoC",
|
||||
"MetricName": "UNCORE_FREQ"
|
||||
"MetricName": "C7_Pkg_Residency",
|
||||
"ScaleUnit": "100%"
|
||||
}
|
||||
]
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
[
|
||||
{
|
||||
"BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0xC3",
|
||||
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
|
||||
"PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from memory disambiguation, external snoops, or cross SMT-HW-thread snoop (stores) hitting load buffers. Machine clears can have a significant performance impact if they are happening frequently.",
|
||||
@@ -11,124 +9,94 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Loads with latency value being above 128.",
|
||||
"Counter": "3",
|
||||
"CounterHTOff": "3",
|
||||
"EventCode": "0xCD",
|
||||
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
|
||||
"MSRIndex": "0x3F6",
|
||||
"MSRValue": "0x80",
|
||||
"PEBS": "2",
|
||||
"SampleAfterValue": "1009",
|
||||
"TakenAlone": "1",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Loads with latency value being above 16.",
|
||||
"Counter": "3",
|
||||
"CounterHTOff": "3",
|
||||
"EventCode": "0xCD",
|
||||
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
|
||||
"MSRIndex": "0x3F6",
|
||||
"MSRValue": "0x10",
|
||||
"PEBS": "2",
|
||||
"SampleAfterValue": "20011",
|
||||
"TakenAlone": "1",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Loads with latency value being above 256.",
|
||||
"Counter": "3",
|
||||
"CounterHTOff": "3",
|
||||
"EventCode": "0xCD",
|
||||
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
|
||||
"MSRIndex": "0x3F6",
|
||||
"MSRValue": "0x100",
|
||||
"PEBS": "2",
|
||||
"SampleAfterValue": "503",
|
||||
"TakenAlone": "1",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Loads with latency value being above 32.",
|
||||
"Counter": "3",
|
||||
"CounterHTOff": "3",
|
||||
"EventCode": "0xCD",
|
||||
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
|
||||
"MSRIndex": "0x3F6",
|
||||
"MSRValue": "0x20",
|
||||
"PEBS": "2",
|
||||
"SampleAfterValue": "100007",
|
||||
"TakenAlone": "1",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Loads with latency value being above 4 .",
|
||||
"Counter": "3",
|
||||
"CounterHTOff": "3",
|
||||
"EventCode": "0xCD",
|
||||
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
|
||||
"MSRIndex": "0x3F6",
|
||||
"MSRValue": "0x4",
|
||||
"PEBS": "2",
|
||||
"SampleAfterValue": "100003",
|
||||
"TakenAlone": "1",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Loads with latency value being above 512.",
|
||||
"Counter": "3",
|
||||
"CounterHTOff": "3",
|
||||
"EventCode": "0xCD",
|
||||
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
|
||||
"MSRIndex": "0x3F6",
|
||||
"MSRValue": "0x200",
|
||||
"PEBS": "2",
|
||||
"SampleAfterValue": "101",
|
||||
"TakenAlone": "1",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Loads with latency value being above 64.",
|
||||
"Counter": "3",
|
||||
"CounterHTOff": "3",
|
||||
"EventCode": "0xCD",
|
||||
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
|
||||
"MSRIndex": "0x3F6",
|
||||
"MSRValue": "0x40",
|
||||
"PEBS": "2",
|
||||
"SampleAfterValue": "2003",
|
||||
"TakenAlone": "1",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Loads with latency value being above 8.",
|
||||
"Counter": "3",
|
||||
"CounterHTOff": "3",
|
||||
"EventCode": "0xCD",
|
||||
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
|
||||
"MSRIndex": "0x3F6",
|
||||
"MSRValue": "0x8",
|
||||
"PEBS": "2",
|
||||
"SampleAfterValue": "50021",
|
||||
"TakenAlone": "1",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only. (Precise Event - PEBS).",
|
||||
"Counter": "3",
|
||||
"CounterHTOff": "3",
|
||||
"EventCode": "0xCD",
|
||||
"EventName": "MEM_TRANS_RETIRED.PRECISE_STORE",
|
||||
"PEBS": "2",
|
||||
"PRECISE_STORE": "1",
|
||||
"SampleAfterValue": "2000003",
|
||||
"TakenAlone": "1",
|
||||
"UMask": "0x2"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Speculative cache line split load uops dispatched to L1 cache.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x05",
|
||||
"EventName": "MISALIGN_MEM_REF.LOADS",
|
||||
"SampleAfterValue": "2000003",
|
||||
@@ -136,8 +104,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x05",
|
||||
"EventName": "MISALIGN_MEM_REF.STORES",
|
||||
"SampleAfterValue": "2000003",
|
||||
@@ -145,277 +111,208 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "This event counts all LLC misses for all demand and L2 prefetches. LLC prefetches are excluded.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"EventCode": "0xB7, 0xBB",
|
||||
"EventName": "OFFCORE_RESPONSE.ALL_DEMAND_MLC_PREF_READS.LLC_MISS.ANY_RESPONSE",
|
||||
"MSRIndex": "0x1a6,0x1a7",
|
||||
"MSRValue": "0x3FFFC20077",
|
||||
"Offcore": "1",
|
||||
"SampleAfterValue": "100003",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts all local dram accesses for all demand and L2 prefetches. LLC prefetches are excluded.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"EventCode": "0xB7, 0xBB",
|
||||
"EventName": "OFFCORE_RESPONSE.ALL_DEMAND_MLC_PREF_READS.LLC_MISS.LOCAL_DRAM",
|
||||
"MSRIndex": "0x1a6,0x1a7",
|
||||
"MSRValue": "0x600400077",
|
||||
"Offcore": "1",
|
||||
"SampleAfterValue": "100003",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "This event counts all remote cache-to-cache transfers (includes HITM and HIT-Forward) for all demand and L2 prefetches. LLC prefetches are excluded.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"EventCode": "0xB7, 0xBB",
|
||||
"EventName": "OFFCORE_RESPONSE.ALL_DEMAND_MLC_PREF_READS.LLC_MISS.REMOTE_HITM_HIT_FORWARD",
|
||||
"MSRIndex": "0x1a6,0x1a7",
|
||||
"MSRValue": "0x187FC20077",
|
||||
"Offcore": "1",
|
||||
"SampleAfterValue": "100003",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts all demand code reads that miss the LLC",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"EventCode": "0xB7, 0xBB",
|
||||
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.ANY_RESPONSE",
|
||||
"MSRIndex": "0x1a6,0x1a7",
|
||||
"MSRValue": "0x3fffc20004",
|
||||
"Offcore": "1",
|
||||
"SampleAfterValue": "100003",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts all demand code reads that miss the LLC and the data returned from local dram",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"EventCode": "0xB7, 0xBB",
|
||||
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.LOCAL_DRAM",
|
||||
"MSRIndex": "0x1a6,0x1a7",
|
||||
"MSRValue": "0x600400004",
|
||||
"Offcore": "1",
|
||||
"SampleAfterValue": "100003",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts all demand code reads that miss the LLC and the data returned from remote dram",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"EventCode": "0xB7, 0xBB",
|
||||
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_DRAM",
|
||||
"MSRIndex": "0x1a6,0x1a7",
|
||||
"MSRValue": "0x67f800004",
|
||||
"Offcore": "1",
|
||||
"SampleAfterValue": "100003",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts all demand code reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"EventCode": "0xB7, 0xBB",
|
||||
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_HITM",
|
||||
"MSRIndex": "0x1a6,0x1a7",
|
||||
"MSRValue": "0x107fc00004",
|
||||
"Offcore": "1",
|
||||
"SampleAfterValue": "100003",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts all demand code reads that miss the LLC and the data forwarded from remote cache",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"EventCode": "0xB7, 0xBB",
|
||||
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_HIT_FORWARD",
|
||||
"MSRIndex": "0x1a6,0x1a7",
|
||||
"MSRValue": "0x87f820004",
|
||||
"Offcore": "1",
|
||||
"SampleAfterValue": "100003",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts demand data reads that miss the LLC and the data returned from remote & local dram",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"EventCode": "0xB7, 0xBB",
|
||||
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_DRAM",
|
||||
"MSRIndex": "0x1a6,0x1a7",
|
||||
"MSRValue": "0x67fc00001",
|
||||
"Offcore": "1",
|
||||
"SampleAfterValue": "100003",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts demand data reads that miss in the LLC",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"EventCode": "0xB7, 0xBB",
|
||||
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_RESPONSE",
|
||||
"MSRIndex": "0x1a6,0x1a7",
|
||||
"MSRValue": "0x3fffc20001",
|
||||
"Offcore": "1",
|
||||
"SampleAfterValue": "100003",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts demand data reads that miss the LLC and the data returned from local dram",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"EventCode": "0xB7, 0xBB",
|
||||
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.LOCAL_DRAM",
|
||||
"MSRIndex": "0x1a6,0x1a7",
|
||||
"MSRValue": "0x600400001",
|
||||
"Offcore": "1",
|
||||
"SampleAfterValue": "100003",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts demand data reads that miss the LLC and the data returned from remote dram",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"EventCode": "0xB7, 0xBB",
|
||||
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_DRAM",
|
||||
"MSRIndex": "0x1a6,0x1a7",
|
||||
"MSRValue": "0x67f800001",
|
||||
"Offcore": "1",
|
||||
"SampleAfterValue": "100003",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts demand data reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"EventCode": "0xB7, 0xBB",
|
||||
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_HITM",
|
||||
"MSRIndex": "0x1a6,0x1a7",
|
||||
"MSRValue": "0x107fc00001",
|
||||
"Offcore": "1",
|
||||
"SampleAfterValue": "100003",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts demand data reads that miss the LLC and the data forwarded from remote cache",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"EventCode": "0xB7, 0xBB",
|
||||
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
|
||||
"MSRIndex": "0x1a6,0x1a7",
|
||||
"MSRValue": "0x87f820001",
|
||||
"Offcore": "1",
|
||||
"SampleAfterValue": "100003",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts all prefetch (that bring data to L2) code reads that miss the LLC and the data returned from remote & local dram",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"EventCode": "0xB7, 0xBB",
|
||||
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_MISS.ANY_RESPONSE",
|
||||
"MSRIndex": "0x1a6,0x1a7",
|
||||
"MSRValue": "0x3fffc20040",
|
||||
"Offcore": "1",
|
||||
"SampleAfterValue": "100003",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from remote & local dram",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"EventCode": "0xB7, 0xBB",
|
||||
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_DRAM",
|
||||
"MSRIndex": "0x1a6,0x1a7",
|
||||
"MSRValue": "0x67fc00010",
|
||||
"Offcore": "1",
|
||||
"SampleAfterValue": "100003",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the LLC",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"EventCode": "0xB7, 0xBB",
|
||||
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_RESPONSE",
|
||||
"MSRIndex": "0x1a6,0x1a7",
|
||||
"MSRValue": "0x3fffc20010",
|
||||
"Offcore": "1",
|
||||
"SampleAfterValue": "100003",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from local dram",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"EventCode": "0xB7, 0xBB",
|
||||
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.LOCAL_DRAM",
|
||||
"MSRIndex": "0x1a6,0x1a7",
|
||||
"MSRValue": "0x600400010",
|
||||
"Offcore": "1",
|
||||
"SampleAfterValue": "100003",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from remote dram",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"EventCode": "0xB7, 0xBB",
|
||||
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_DRAM",
|
||||
"MSRIndex": "0x1a6,0x1a7",
|
||||
"MSRValue": "0x67f800010",
|
||||
"Offcore": "1",
|
||||
"SampleAfterValue": "100003",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"EventCode": "0xB7, 0xBB",
|
||||
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_HITM",
|
||||
"MSRIndex": "0x1a6,0x1a7",
|
||||
"MSRValue": "0x107fc00010",
|
||||
"Offcore": "1",
|
||||
"SampleAfterValue": "100003",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data forwarded from remote cache",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"EventCode": "0xB7, 0xBB",
|
||||
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
|
||||
"MSRIndex": "0x1a6,0x1a7",
|
||||
"MSRValue": "0x87f820010",
|
||||
"Offcore": "1",
|
||||
"SampleAfterValue": "100003",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the LLC",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"EventCode": "0xB7, 0xBB",
|
||||
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
|
||||
"MSRIndex": "0x1a6,0x1a7",
|
||||
"MSRValue": "0x3fffc20200",
|
||||
"Offcore": "1",
|
||||
"SampleAfterValue": "100003",
|
||||
"UMask": "0x1"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3",
|
||||
"EventCode": "0xB7, 0xBB",
|
||||
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_MISS.ANY_RESPONSE",
|
||||
"MSRIndex": "0x1a6,0x1a7",
|
||||
"MSRValue": "0x3fffc20080",
|
||||
"Offcore": "1",
|
||||
"SampleAfterValue": "100003",
|
||||
"UMask": "0x1"
|
||||
}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
[
|
||||
{
|
||||
"BriefDescription": "Unhalted core cycles when the thread is in ring 0.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x5C",
|
||||
"EventName": "CPL_CYCLES.RING0",
|
||||
"SampleAfterValue": "2000003",
|
||||
@@ -10,8 +8,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of intervals between processor halts while thread is in ring 0.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"CounterMask": "1",
|
||||
"EdgeDetect": "1",
|
||||
"EventCode": "0x5C",
|
||||
@@ -21,8 +17,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x5C",
|
||||
"EventName": "CPL_CYCLES.RING123",
|
||||
"SampleAfterValue": "2000003",
|
||||
@@ -30,8 +24,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Hardware Prefetch requests that miss the L1D cache. This accounts for both L1 streamer and IP-based (IPP) HW prefetchers. A request is being counted each time it access the cache & miss it, including if a block is applicable or if hit the Fill Buffer for .",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x4E",
|
||||
"EventName": "HW_PRE_REQ.DL1_MISS",
|
||||
"SampleAfterValue": "2000003",
|
||||
@@ -39,8 +31,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Valid instructions written to IQ per cycle.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x17",
|
||||
"EventName": "INSTS_WRITTEN_TO_IQ.INSTS",
|
||||
"SampleAfterValue": "2000003",
|
||||
@@ -48,8 +38,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x63",
|
||||
"EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
|
||||
"SampleAfterValue": "2000003",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,6 @@
|
||||
[
|
||||
{
|
||||
"BriefDescription": "Number of qfclks",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x14",
|
||||
"EventName": "UNC_Q_CLOCKTICKS",
|
||||
"PerPkg": "1",
|
||||
@@ -10,17 +9,14 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Count of CTO Events",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x38",
|
||||
"EventName": "UNC_Q_CTO_COUNT",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of CTO (cluster trigger outs) events that were asserted across the two slots. If both slots trigger in a given cycle, the event will increment by 2. You can use edge detect to count the number of cases when both events triggered.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress Credits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x13",
|
||||
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS",
|
||||
"PerPkg": "1",
|
||||
@@ -30,7 +26,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x13",
|
||||
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_RBT",
|
||||
"PerPkg": "1",
|
||||
@@ -40,7 +35,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Not Set",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x13",
|
||||
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_RBT",
|
||||
"PerPkg": "1",
|
||||
@@ -50,7 +44,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Direct 2 Core Spawning; Spawn Success",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x13",
|
||||
"EventName": "UNC_Q_DIRECT2CORE.SUCCESS",
|
||||
"PerPkg": "1",
|
||||
@@ -60,7 +53,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles in L1",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x12",
|
||||
"EventName": "UNC_Q_L1_POWER_CYCLES",
|
||||
"PerPkg": "1",
|
||||
@@ -69,7 +61,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles in L0p",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x10",
|
||||
"EventName": "UNC_Q_RxL0P_POWER_CYCLES",
|
||||
"PerPkg": "1",
|
||||
@@ -78,7 +69,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles in L0",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xf",
|
||||
"EventName": "UNC_Q_RxL0_POWER_CYCLES",
|
||||
"PerPkg": "1",
|
||||
@@ -87,7 +77,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Rx Flit Buffer Bypassed",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x9",
|
||||
"EventName": "UNC_Q_RxL_BYPASSED",
|
||||
"PerPkg": "1",
|
||||
@@ -96,7 +85,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "CRC Errors Detected; LinkInit",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x3",
|
||||
"EventName": "UNC_Q_RxL_CRC_ERRORS.LINK_INIT",
|
||||
"PerPkg": "1",
|
||||
@@ -106,7 +94,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "CRC Errors Detected; Normal Operations",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x3",
|
||||
"EventName": "UNC_Q_RxL_CRC_ERRORS.NORMAL_OP",
|
||||
"PerPkg": "1",
|
||||
@@ -116,10 +103,8 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "VN0 Credit Consumed; DRS",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1e",
|
||||
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.DRS",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
|
||||
"UMask": "0x1",
|
||||
@@ -127,10 +112,8 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "VN0 Credit Consumed; HOM",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1e",
|
||||
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.HOM",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
|
||||
"UMask": "0x8",
|
||||
@@ -138,10 +121,8 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "VN0 Credit Consumed; NCB",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1e",
|
||||
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCB",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
|
||||
"UMask": "0x2",
|
||||
@@ -149,10 +130,8 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "VN0 Credit Consumed; NCS",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1e",
|
||||
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCS",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
|
||||
"UMask": "0x4",
|
||||
@@ -160,10 +139,8 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "VN0 Credit Consumed; NDR",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1e",
|
||||
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NDR",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
|
||||
"UMask": "0x20",
|
||||
@@ -171,10 +148,8 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "VN0 Credit Consumed; SNP",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1e",
|
||||
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.SNP",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
|
||||
"UMask": "0x10",
|
||||
@@ -182,17 +157,14 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "VNA Credit Consumed",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1d",
|
||||
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VNA",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of times that an RxQ VNA credit was consumed (i.e. message uses a VNA credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "RxQ Cycles Not Empty",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xa",
|
||||
"EventName": "UNC_Q_RxL_CYCLES_NE",
|
||||
"PerPkg": "1",
|
||||
@@ -201,7 +173,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 0; Data Tx Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G0.DATA",
|
||||
"PerPkg": "1",
|
||||
@@ -211,7 +182,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 0; Idle and Null Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G0.IDLE",
|
||||
"PerPkg": "1",
|
||||
@@ -221,7 +191,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 0; Non-Data protocol Tx Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G0.NON_DATA",
|
||||
"PerPkg": "1",
|
||||
@@ -231,10 +200,8 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 1; DRS Flits (both Header and Data)",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G1.DRS",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x18",
|
||||
@@ -242,10 +209,8 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 1; DRS Data Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G1.DRS_DATA",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x8",
|
||||
@@ -253,10 +218,8 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 1; DRS Header Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G1.DRS_NONDATA",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x10",
|
||||
@@ -264,10 +227,8 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 1; HOM Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G1.HOM",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x6",
|
||||
@@ -275,10 +236,8 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 1; HOM Non-Request Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G1.HOM_NONREQ",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x4",
|
||||
@@ -286,10 +245,8 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 1; HOM Request Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G1.HOM_REQ",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x2",
|
||||
@@ -297,10 +254,8 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 1; SNP Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G1.SNP",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x1",
|
||||
@@ -308,10 +263,8 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 2; Non-Coherent Rx Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x3",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G2.NCB",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0xc",
|
||||
@@ -319,10 +272,8 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 2; Non-Coherent data Rx Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x3",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G2.NCB_DATA",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x4",
|
||||
@@ -330,10 +281,8 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 2; Non-Coherent non-data Rx Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x3",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G2.NCB_NONDATA",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x8",
|
||||
@@ -341,10 +290,8 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 2; Non-Coherent standard Rx Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x3",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G2.NCS",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x10",
|
||||
@@ -352,10 +299,8 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AD",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x3",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G2.NDR_AD",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x1",
|
||||
@@ -363,10 +308,8 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AK",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x3",
|
||||
"EventName": "UNC_Q_RxL_FLITS_G2.NDR_AK",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x2",
|
||||
@@ -374,7 +317,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Rx Flit Buffer Allocations",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x8",
|
||||
"EventName": "UNC_Q_RxL_INSERTS",
|
||||
"PerPkg": "1",
|
||||
@@ -383,67 +325,54 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Rx Flit Buffer Allocations - DRS",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x9",
|
||||
"EventName": "UNC_Q_RxL_INSERTS_DRS",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Rx Flit Buffer Allocations - HOM",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xc",
|
||||
"EventName": "UNC_Q_RxL_INSERTS_HOM",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Rx Flit Buffer Allocations - NCB",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xa",
|
||||
"EventName": "UNC_Q_RxL_INSERTS_NCB",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Rx Flit Buffer Allocations - NCS",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xb",
|
||||
"EventName": "UNC_Q_RxL_INSERTS_NCS",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Rx Flit Buffer Allocations - NDR",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xe",
|
||||
"EventName": "UNC_Q_RxL_INSERTS_NDR",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Rx Flit Buffer Allocations - SNP",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xd",
|
||||
"EventName": "UNC_Q_RxL_INSERTS_SNP",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "RxQ Occupancy - All Packets",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xb",
|
||||
"EventName": "UNC_Q_RxL_OCCUPANCY",
|
||||
"PerPkg": "1",
|
||||
@@ -452,67 +381,54 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "RxQ Occupancy - DRS",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x15",
|
||||
"EventName": "UNC_Q_RxL_OCCUPANCY_DRS",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "RxQ Occupancy - HOM",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x18",
|
||||
"EventName": "UNC_Q_RxL_OCCUPANCY_HOM",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "RxQ Occupancy - NCB",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x16",
|
||||
"EventName": "UNC_Q_RxL_OCCUPANCY_NCB",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "RxQ Occupancy - NCS",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x17",
|
||||
"EventName": "UNC_Q_RxL_OCCUPANCY_NCS",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "RxQ Occupancy - NDR",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1a",
|
||||
"EventName": "UNC_Q_RxL_OCCUPANCY_NDR",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "RxQ Occupancy - SNP",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x19",
|
||||
"EventName": "UNC_Q_RxL_OCCUPANCY_SNP",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Stalls Sending to R3QPI; BGF Stall - HOM",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x35",
|
||||
"EventName": "UNC_Q_RxL_STALLS.BGF_DRS",
|
||||
"PerPkg": "1",
|
||||
@@ -522,7 +438,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Stalls Sending to R3QPI; BGF Stall - DRS",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x35",
|
||||
"EventName": "UNC_Q_RxL_STALLS.BGF_HOM",
|
||||
"PerPkg": "1",
|
||||
@@ -532,7 +447,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Stalls Sending to R3QPI; BGF Stall - SNP",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x35",
|
||||
"EventName": "UNC_Q_RxL_STALLS.BGF_NCB",
|
||||
"PerPkg": "1",
|
||||
@@ -542,7 +456,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Stalls Sending to R3QPI; BGF Stall - NDR",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x35",
|
||||
"EventName": "UNC_Q_RxL_STALLS.BGF_NCS",
|
||||
"PerPkg": "1",
|
||||
@@ -552,7 +465,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Stalls Sending to R3QPI; BGF Stall - NCS",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x35",
|
||||
"EventName": "UNC_Q_RxL_STALLS.BGF_NDR",
|
||||
"PerPkg": "1",
|
||||
@@ -562,7 +474,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Stalls Sending to R3QPI; BGF Stall - NCB",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x35",
|
||||
"EventName": "UNC_Q_RxL_STALLS.BGF_SNP",
|
||||
"PerPkg": "1",
|
||||
@@ -572,7 +483,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Stalls Sending to R3QPI; Egress Credits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x35",
|
||||
"EventName": "UNC_Q_RxL_STALLS.EGRESS_CREDITS",
|
||||
"PerPkg": "1",
|
||||
@@ -582,7 +492,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Stalls Sending to R3QPI; GV",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x35",
|
||||
"EventName": "UNC_Q_RxL_STALLS.GV",
|
||||
"PerPkg": "1",
|
||||
@@ -592,7 +501,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles in L0p",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xd",
|
||||
"EventName": "UNC_Q_TxL0P_POWER_CYCLES",
|
||||
"PerPkg": "1",
|
||||
@@ -601,7 +509,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles in L0",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xc",
|
||||
"EventName": "UNC_Q_TxL0_POWER_CYCLES",
|
||||
"PerPkg": "1",
|
||||
@@ -610,7 +517,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Tx Flit Buffer Bypassed",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x5",
|
||||
"EventName": "UNC_Q_TxL_BYPASSED",
|
||||
"PerPkg": "1",
|
||||
@@ -619,7 +525,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles Stalled with no LLR Credits; LLR is almost full",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_Q_TxL_CRC_NO_CREDITS.ALMOST_FULL",
|
||||
"PerPkg": "1",
|
||||
@@ -629,7 +534,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles Stalled with no LLR Credits; LLR is full",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_Q_TxL_CRC_NO_CREDITS.FULL",
|
||||
"PerPkg": "1",
|
||||
@@ -639,7 +543,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Tx Flit Buffer Cycles not Empty",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x6",
|
||||
"EventName": "UNC_Q_TxL_CYCLES_NE",
|
||||
"PerPkg": "1",
|
||||
@@ -648,7 +551,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 0; Data Tx Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G0.DATA",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
|
||||
@@ -657,7 +559,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 0; Idle and Null Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G0.IDLE",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
|
||||
@@ -666,7 +567,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 0; Non-Data protocol Tx Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G0.NON_DATA",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
|
||||
@@ -675,9 +575,7 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 1; DRS Flits (both Header and Data)",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G1.DRS",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x18",
|
||||
@@ -685,9 +583,7 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 1; DRS Data Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G1.DRS_DATA",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x8",
|
||||
@@ -695,9 +591,7 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 1; DRS Header Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G1.DRS_NONDATA",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x10",
|
||||
@@ -705,9 +599,7 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 1; HOM Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G1.HOM",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x6",
|
||||
@@ -715,9 +607,7 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 1; HOM Non-Request Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G1.HOM_NONREQ",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x4",
|
||||
@@ -725,9 +615,7 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 1; HOM Request Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G1.HOM_REQ",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x2",
|
||||
@@ -735,9 +623,7 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 1; SNP Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G1.SNP",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x1",
|
||||
@@ -745,10 +631,8 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent Bypass Tx Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G2.NCB",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0xc",
|
||||
@@ -756,10 +640,8 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent data Tx Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G2.NCB_DATA",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x4",
|
||||
@@ -767,10 +649,8 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent non-data Tx Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G2.NCB_NONDATA",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x8",
|
||||
@@ -778,10 +658,8 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent standard Tx Flits",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G2.NCS",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x10",
|
||||
@@ -789,10 +667,8 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AD",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G2.NDR_AD",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x1",
|
||||
@@ -800,10 +676,8 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AK",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1",
|
||||
"EventName": "UNC_Q_TxL_FLITS_G2.NDR_AK",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of flits trasmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transfering a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
|
||||
"UMask": "0x2",
|
||||
@@ -811,7 +685,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Tx Flit Buffer Allocations",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x4",
|
||||
"EventName": "UNC_Q_TxL_INSERTS",
|
||||
"PerPkg": "1",
|
||||
@@ -820,7 +693,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Tx Flit Buffer Occupancy",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x7",
|
||||
"EventName": "UNC_Q_TxL_OCCUPANCY",
|
||||
"PerPkg": "1",
|
||||
@@ -829,20 +701,16 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "VNA Credits Returned",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1c",
|
||||
"EventName": "UNC_Q_VNA_CREDIT_RETURNS",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of VNA credits returned.",
|
||||
"Unit": "QPI LL"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "VNA Credits Pending Return - Occupancy",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1b",
|
||||
"EventName": "UNC_Q_VNA_CREDIT_RETURN_OCCUPANCY",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of VNA credits in the Rx side that are waitng to be returned back across the link.",
|
||||
"Unit": "QPI LL"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
[
|
||||
{
|
||||
"BriefDescription": "DRAM Activate Count",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1",
|
||||
"EventName": "UNC_M_ACT_COUNT",
|
||||
"PerPkg": "1",
|
||||
@@ -10,77 +9,62 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (w/ and w/out auto-pre)",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x4",
|
||||
"EventName": "UNC_M_CAS_COUNT.ALL",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
|
||||
"UMask": "0xf",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM Reads (RD_CAS + Underfills)",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x4",
|
||||
"EventName": "UNC_M_CAS_COUNT.RD",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
|
||||
"UMask": "0x3",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM RD_CAS (w/ and w/out auto-pre)",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x4",
|
||||
"EventName": "UNC_M_CAS_COUNT.RD_REG",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
|
||||
"UMask": "0x1",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Underfill Read Issued",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x4",
|
||||
"EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
|
||||
"UMask": "0x2",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (both Modes)",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x4",
|
||||
"EventName": "UNC_M_CAS_COUNT.WR",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
|
||||
"UMask": "0xc",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Read Major Mode",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x4",
|
||||
"EventName": "UNC_M_CAS_COUNT.WR_RMM",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
|
||||
"UMask": "0x8",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x4",
|
||||
"EventName": "UNC_M_CAS_COUNT.WR_WMM",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "DRAM RD_CAS and WR_CAS Commands",
|
||||
"UMask": "0x4",
|
||||
"Unit": "iMC"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "uclks",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_M_CLOCKTICKS",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Uncore Fixed Counter - uclks",
|
||||
@@ -88,7 +72,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "DRAM Precharge All Commands",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x6",
|
||||
"EventName": "UNC_M_DRAM_PRE_ALL",
|
||||
"PerPkg": "1",
|
||||
@@ -97,7 +80,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of DRAM Refreshes Issued",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x5",
|
||||
"EventName": "UNC_M_DRAM_REFRESH.HIGH",
|
||||
"PerPkg": "1",
|
||||
@@ -107,7 +89,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of DRAM Refreshes Issued",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x5",
|
||||
"EventName": "UNC_M_DRAM_REFRESH.PANIC",
|
||||
"PerPkg": "1",
|
||||
@@ -117,7 +98,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "ECC Correctable Errors",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x9",
|
||||
"EventName": "UNC_M_ECC_CORRECTABLE_ERRORS",
|
||||
"PerPkg": "1",
|
||||
@@ -126,7 +106,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles in a Major Mode; Isoch Major Mode",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x7",
|
||||
"EventName": "UNC_M_MAJOR_MODES.ISOCH",
|
||||
"PerPkg": "1",
|
||||
@@ -136,7 +115,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles in a Major Mode; Partial Major Mode",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x7",
|
||||
"EventName": "UNC_M_MAJOR_MODES.PARTIAL",
|
||||
"PerPkg": "1",
|
||||
@@ -146,7 +124,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles in a Major Mode; Read Major Mode",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x7",
|
||||
"EventName": "UNC_M_MAJOR_MODES.READ",
|
||||
"PerPkg": "1",
|
||||
@@ -156,7 +133,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles in a Major Mode; Write Major Mode",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x7",
|
||||
"EventName": "UNC_M_MAJOR_MODES.WRITE",
|
||||
"PerPkg": "1",
|
||||
@@ -166,7 +142,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Channel DLLOFF Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x84",
|
||||
"EventName": "UNC_M_POWER_CHANNEL_DLLOFF",
|
||||
"PerPkg": "1",
|
||||
@@ -175,7 +150,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Channel PPD Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x85",
|
||||
"EventName": "UNC_M_POWER_CHANNEL_PPD",
|
||||
"PerPkg": "1",
|
||||
@@ -184,7 +158,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x83",
|
||||
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK0",
|
||||
"PerPkg": "1",
|
||||
@@ -194,7 +167,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x83",
|
||||
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK1",
|
||||
"PerPkg": "1",
|
||||
@@ -204,7 +176,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x83",
|
||||
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK2",
|
||||
"PerPkg": "1",
|
||||
@@ -214,7 +185,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x83",
|
||||
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK3",
|
||||
"PerPkg": "1",
|
||||
@@ -224,7 +194,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x83",
|
||||
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK4",
|
||||
"PerPkg": "1",
|
||||
@@ -234,7 +203,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x83",
|
||||
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK5",
|
||||
"PerPkg": "1",
|
||||
@@ -244,7 +212,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x83",
|
||||
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK6",
|
||||
"PerPkg": "1",
|
||||
@@ -254,7 +221,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x83",
|
||||
"EventName": "UNC_M_POWER_CKE_CYCLES.RANK7",
|
||||
"PerPkg": "1",
|
||||
@@ -264,7 +230,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Critical Throttle Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x86",
|
||||
"EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
|
||||
"PerPkg": "1",
|
||||
@@ -273,7 +238,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Clock-Enabled Self-Refresh",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x43",
|
||||
"EventName": "UNC_M_POWER_SELF_REFRESH",
|
||||
"PerPkg": "1",
|
||||
@@ -282,7 +246,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x41",
|
||||
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK0",
|
||||
"PerPkg": "1",
|
||||
@@ -292,7 +255,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x41",
|
||||
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK1",
|
||||
"PerPkg": "1",
|
||||
@@ -302,7 +264,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x41",
|
||||
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK2",
|
||||
"PerPkg": "1",
|
||||
@@ -312,7 +273,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x41",
|
||||
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK3",
|
||||
"PerPkg": "1",
|
||||
@@ -322,7 +282,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x41",
|
||||
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK4",
|
||||
"PerPkg": "1",
|
||||
@@ -332,7 +291,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x41",
|
||||
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK5",
|
||||
"PerPkg": "1",
|
||||
@@ -342,7 +300,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x41",
|
||||
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK6",
|
||||
"PerPkg": "1",
|
||||
@@ -352,7 +309,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x41",
|
||||
"EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK7",
|
||||
"PerPkg": "1",
|
||||
@@ -362,7 +318,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Read Preemption Count; Read over Read Preemption",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x8",
|
||||
"EventName": "UNC_M_PREEMPTION.RD_PREEMPT_RD",
|
||||
"PerPkg": "1",
|
||||
@@ -372,7 +327,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Read Preemption Count; Read over Write Preemption",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x8",
|
||||
"EventName": "UNC_M_PREEMPTION.RD_PREEMPT_WR",
|
||||
"PerPkg": "1",
|
||||
@@ -382,7 +336,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "DRAM Precharge commands.; Precharge due to timer expiration",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_M_PRE_COUNT.PAGE_CLOSE",
|
||||
"PerPkg": "1",
|
||||
@@ -392,7 +345,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "DRAM Precharge commands.; Precharges due to page miss",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
|
||||
"PerPkg": "1",
|
||||
@@ -402,7 +354,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Read Pending Queue Full Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x12",
|
||||
"EventName": "UNC_M_RPQ_CYCLES_FULL",
|
||||
"PerPkg": "1",
|
||||
@@ -411,7 +362,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Read Pending Queue Not Empty",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x11",
|
||||
"EventName": "UNC_M_RPQ_CYCLES_NE",
|
||||
"PerPkg": "1",
|
||||
@@ -420,7 +370,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Read Pending Queue Allocations",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x10",
|
||||
"EventName": "UNC_M_RPQ_INSERTS",
|
||||
"PerPkg": "1",
|
||||
@@ -429,7 +378,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Read Pending Queue Occupancy",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x80",
|
||||
"EventName": "UNC_M_RPQ_OCCUPANCY",
|
||||
"PerPkg": "1",
|
||||
@@ -438,7 +386,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Write Pending Queue Full Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x22",
|
||||
"EventName": "UNC_M_WPQ_CYCLES_FULL",
|
||||
"PerPkg": "1",
|
||||
@@ -447,7 +394,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Write Pending Queue Not Empty",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x21",
|
||||
"EventName": "UNC_M_WPQ_CYCLES_NE",
|
||||
"PerPkg": "1",
|
||||
@@ -456,7 +402,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Write Pending Queue Allocations",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x20",
|
||||
"EventName": "UNC_M_WPQ_INSERTS",
|
||||
"PerPkg": "1",
|
||||
@@ -465,7 +410,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Write Pending Queue Occupancy",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x81",
|
||||
"EventName": "UNC_M_WPQ_OCCUPANCY",
|
||||
"PerPkg": "1",
|
||||
@@ -474,7 +418,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Write Pending Queue CAM Match",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x23",
|
||||
"EventName": "UNC_M_WPQ_READ_HIT",
|
||||
"PerPkg": "1",
|
||||
@@ -483,7 +426,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Write Pending Queue CAM Match",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x24",
|
||||
"EventName": "UNC_M_WPQ_WRITE_HIT",
|
||||
"PerPkg": "1",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,6 @@
|
||||
[
|
||||
{
|
||||
"BriefDescription": "pclk Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_P_CLOCKTICKS",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "The PCU runs off a fixed 800 MHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.",
|
||||
@@ -9,87 +8,70 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Transition Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x3",
|
||||
"EventName": "UNC_P_CORE0_TRANSITION_CYCLES",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Transition Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x4",
|
||||
"EventName": "UNC_P_CORE1_TRANSITION_CYCLES",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Transition Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x5",
|
||||
"EventName": "UNC_P_CORE2_TRANSITION_CYCLES",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Transition Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x6",
|
||||
"EventName": "UNC_P_CORE3_TRANSITION_CYCLES",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Transition Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x7",
|
||||
"EventName": "UNC_P_CORE4_TRANSITION_CYCLES",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Transition Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x8",
|
||||
"EventName": "UNC_P_CORE5_TRANSITION_CYCLES",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Transition Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x9",
|
||||
"EventName": "UNC_P_CORE6_TRANSITION_CYCLES",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Transition Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xa",
|
||||
"EventName": "UNC_P_CORE7_TRANSITION_CYCLES",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Demotions",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1e",
|
||||
"EventName": "UNC_P_DEMOTIONS_CORE0",
|
||||
"PerPkg": "1",
|
||||
@@ -98,7 +80,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Demotions",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1f",
|
||||
"EventName": "UNC_P_DEMOTIONS_CORE1",
|
||||
"PerPkg": "1",
|
||||
@@ -107,7 +88,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Demotions",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x20",
|
||||
"EventName": "UNC_P_DEMOTIONS_CORE2",
|
||||
"PerPkg": "1",
|
||||
@@ -116,7 +96,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Demotions",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x21",
|
||||
"EventName": "UNC_P_DEMOTIONS_CORE3",
|
||||
"PerPkg": "1",
|
||||
@@ -125,7 +104,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Demotions",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x22",
|
||||
"EventName": "UNC_P_DEMOTIONS_CORE4",
|
||||
"PerPkg": "1",
|
||||
@@ -134,7 +112,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Demotions",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x23",
|
||||
"EventName": "UNC_P_DEMOTIONS_CORE5",
|
||||
"PerPkg": "1",
|
||||
@@ -143,7 +120,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Demotions",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x24",
|
||||
"EventName": "UNC_P_DEMOTIONS_CORE6",
|
||||
"PerPkg": "1",
|
||||
@@ -152,7 +128,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Core C State Demotions",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x25",
|
||||
"EventName": "UNC_P_DEMOTIONS_CORE7",
|
||||
"PerPkg": "1",
|
||||
@@ -161,7 +136,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Frequency Residency",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xb",
|
||||
"EventName": "UNC_P_FREQ_BAND0_CYCLES",
|
||||
"PerPkg": "1",
|
||||
@@ -170,7 +144,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Frequency Residency",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xc",
|
||||
"EventName": "UNC_P_FREQ_BAND1_CYCLES",
|
||||
"PerPkg": "1",
|
||||
@@ -179,7 +152,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Frequency Residency",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xd",
|
||||
"EventName": "UNC_P_FREQ_BAND2_CYCLES",
|
||||
"PerPkg": "1",
|
||||
@@ -188,7 +160,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Frequency Residency",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xe",
|
||||
"EventName": "UNC_P_FREQ_BAND3_CYCLES",
|
||||
"PerPkg": "1",
|
||||
@@ -197,7 +168,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Current Strongest Upper Limit Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x7",
|
||||
"EventName": "UNC_P_FREQ_MAX_CURRENT_CYCLES",
|
||||
"PerPkg": "1",
|
||||
@@ -206,7 +176,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Thermal Strongest Upper Limit Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x4",
|
||||
"EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
|
||||
"PerPkg": "1",
|
||||
@@ -215,7 +184,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "OS Strongest Upper Limit Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x6",
|
||||
"EventName": "UNC_P_FREQ_MAX_OS_CYCLES",
|
||||
"PerPkg": "1",
|
||||
@@ -224,7 +192,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Power Strongest Upper Limit Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x5",
|
||||
"EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
|
||||
"PerPkg": "1",
|
||||
@@ -233,36 +200,29 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1",
|
||||
"EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Perf P Limit Strongest Lower Limit Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_P_FREQ_MIN_PERF_P_CYCLES",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles when Perf P Limit is preventing us from dropping the frequency lower. Perf P Limit is an algorithm that takes input from remote sockets when determining if a socket should drop it's frequency down. This is largely to minimize increases in snoop and remote read latencies.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles spent changing Frequency",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventName": "UNC_P_FREQ_TRANS_CYCLES",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Memory Phase Shedding Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2f",
|
||||
"EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
|
||||
"PerPkg": "1",
|
||||
@@ -271,7 +231,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of cores in C0",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x80",
|
||||
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
|
||||
"PerPkg": "1",
|
||||
@@ -280,7 +239,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of cores in C0",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x80",
|
||||
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
|
||||
"PerPkg": "1",
|
||||
@@ -289,7 +247,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Number of cores in C0",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x80",
|
||||
"EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
|
||||
"PerPkg": "1",
|
||||
@@ -298,7 +255,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "External Prochot",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xa",
|
||||
"EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
|
||||
"PerPkg": "1",
|
||||
@@ -307,7 +263,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Internal Prochot",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x9",
|
||||
"EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
|
||||
"PerPkg": "1",
|
||||
@@ -316,17 +271,14 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Total Core C State Transition Cycles",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0xb",
|
||||
"EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
|
||||
"ExtSel": "1",
|
||||
"PerPkg": "1",
|
||||
"PublicDescription": "Number of cycles spent performing core C state transitions across all cores.",
|
||||
"Unit": "PCU"
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles Changing Voltage",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x3",
|
||||
"EventName": "UNC_P_VOLT_TRANS_CYCLES_CHANGE",
|
||||
"PerPkg": "1",
|
||||
@@ -335,7 +287,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles Decreasing Voltage",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x2",
|
||||
"EventName": "UNC_P_VOLT_TRANS_CYCLES_DECREASE",
|
||||
"PerPkg": "1",
|
||||
@@ -344,7 +295,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles Increasing Voltage",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x1",
|
||||
"EventName": "UNC_P_VOLT_TRANS_CYCLES_INCREASE",
|
||||
"PerPkg": "1",
|
||||
@@ -353,7 +303,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "VR Hot",
|
||||
"Counter": "0,1,2,3",
|
||||
"EventCode": "0x32",
|
||||
"EventName": "UNC_P_VR_HOT_CYCLES",
|
||||
"PerPkg": "1",
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
[
|
||||
{
|
||||
"BriefDescription": "Load misses in all DTLB levels that cause page walks.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x08",
|
||||
"EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
|
||||
"SampleAfterValue": "100003",
|
||||
@@ -10,8 +8,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x08",
|
||||
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
|
||||
"PublicDescription": "This event counts load operations that miss the first DTLB level but hit the second and do not cause any page walks. The penalty in this case is approximately 7 cycles.",
|
||||
@@ -20,8 +16,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Load misses at all DTLB levels that cause completed page walks.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x08",
|
||||
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
|
||||
"SampleAfterValue": "100003",
|
||||
@@ -29,8 +23,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles when PMH is busy with page walks.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x08",
|
||||
"EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
|
||||
"PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
|
||||
@@ -39,8 +31,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Store misses in all DTLB levels that cause page walks.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x49",
|
||||
"EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
|
||||
"SampleAfterValue": "100003",
|
||||
@@ -48,8 +38,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x49",
|
||||
"EventName": "DTLB_STORE_MISSES.STLB_HIT",
|
||||
"SampleAfterValue": "100003",
|
||||
@@ -57,8 +45,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x49",
|
||||
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
|
||||
"SampleAfterValue": "100003",
|
||||
@@ -66,8 +52,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles when PMH is busy with page walks.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x49",
|
||||
"EventName": "DTLB_STORE_MISSES.WALK_DURATION",
|
||||
"SampleAfterValue": "2000003",
|
||||
@@ -75,8 +59,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycle count for an Extended Page table walk. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x4F",
|
||||
"EventName": "EPT.WALK_CYCLES",
|
||||
"SampleAfterValue": "2000003",
|
||||
@@ -84,8 +66,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0xAE",
|
||||
"EventName": "ITLB.ITLB_FLUSH",
|
||||
"SampleAfterValue": "100007",
|
||||
@@ -93,8 +73,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Misses at all ITLB levels that cause page walks.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x85",
|
||||
"EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
|
||||
"SampleAfterValue": "100003",
|
||||
@@ -102,8 +80,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x85",
|
||||
"EventName": "ITLB_MISSES.STLB_HIT",
|
||||
"SampleAfterValue": "100003",
|
||||
@@ -111,8 +87,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x85",
|
||||
"EventName": "ITLB_MISSES.WALK_COMPLETED",
|
||||
"SampleAfterValue": "100003",
|
||||
@@ -120,8 +94,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "Cycles when PMH is busy with page walks.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0x85",
|
||||
"EventName": "ITLB_MISSES.WALK_DURATION",
|
||||
"PublicDescription": "This event count cycles when Page Miss Handler (PMH) is servicing page walks caused by ITLB misses.",
|
||||
@@ -130,8 +102,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "DTLB flush attempts of the thread-specific entries.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0xBD",
|
||||
"EventName": "TLB_FLUSH.DTLB_THREAD",
|
||||
"SampleAfterValue": "100007",
|
||||
@@ -139,8 +109,6 @@
|
||||
},
|
||||
{
|
||||
"BriefDescription": "STLB flush attempts.",
|
||||
"Counter": "0,1,2,3",
|
||||
"CounterHTOff": "0,1,2,3,4,5,6,7",
|
||||
"EventCode": "0xBD",
|
||||
"EventName": "TLB_FLUSH.STLB_ANY",
|
||||
"SampleAfterValue": "100007",
|
||||
|
||||
Reference in New Issue
Block a user