KbL i7-8550U
I'm researching the behavior of uops-cache and came across a misunderstanding regarding it.
As specified in the Intel Optimization Manual 2.5.2.2 (emp. mine):
The Decoded ICache consists of 32 sets. Each set contains eight Ways. Each Way can hold up to six micro-ops.
-
All micro-ops in a Way represent instructions which are statically contiguous in the code and have their EIPs within the same aligned 32-byte region.
-
Up to three Ways may be dedicated to the same 32-byte aligned chunk, allowing a total of 18 micro-ops to be cached per 32-byte region of the original IA program.
-
A non-conditional branch is the last micro-op in a Way.
CASE 1:
Consider the following routine:
uop.h
void inhibit_uops_cache(size_t);
uop.S
align 32
inhibit_uops_cache:
    mov edx, esi
    mov edx, esi
    mov edx, esi
    mov edx, esi
    mov edx, esi
    mov edx, esi
    jmp decrement_jmp_tgt
decrement_jmp_tgt:
    dec rdi
    ja inhibit_uops_cache ;ja is intentional to avoid Macro-fusion
    ret
To make sure that the code of the routine is actually 32-bytes aligned here is the asm
0x555555554820 <inhibit_uops_cache>     mov    edx,esi
0x555555554822 <inhibit_uops_cache+2>   mov    edx,esi
0x555555554824 <inhibit_uops_cache+4>   mov    edx,esi
0x555555554826 <inhibit_uops_cache+6>   mov    edx,esi
0x555555554828 <inhibit_uops_cache+8>   mov    edx,esi
0x55555555482a <inhibit_uops_cache+10>  mov    edx,esi
0x55555555482c <inhibit_uops_cache+12>  jmp    0x55555555482e <decrement_jmp_tgt>
0x55555555482e <decrement_jmp_tgt>      dec    rdi
0x555555554831 <decrement_jmp_tgt+3>    ja     0x555555554820 <inhibit_uops_cache>
0x555555554833 <decrement_jmp_tgt+5>    ret
0x555555554834 <decrement_jmp_tgt+6>    nop
0x555555554835 <decrement_jmp_tgt+7>    nop
0x555555554836 <decrement_jmp_tgt+8>    nop
0x555555554837 <decrement_jmp_tgt+9>    nop
0x555555554838 <decrement_jmp_tgt+10>   nop
0x555555554839 <decrement_jmp_tgt+11>   nop
0x55555555483a <decrement_jmp_tgt+12>   nop
0x55555555483b <decrement_jmp_tgt+13>   nop
0x55555555483c <decrement_jmp_tgt+14>   nop
0x55555555483d <decrement_jmp_tgt+15>   nop
0x55555555483e <decrement_jmp_tgt+16>   nop
0x55555555483f <decrement_jmp_tgt+17>   nop             
running as
int main(void){
    inhibit_uops_cache(4096 * 4096 * 128L);
}
I got the counters
 Performance counter stats for './bin':
     6 431 201 748      idq.dsb_cycles                                                (56,91%)
    19 175 741 518      idq.dsb_uops                                                  (57,13%)
         7 866 687      idq.mite_uops                                                 (57,36%)
         3 954 421      idq.ms_uops                                                   (57,46%)
           560 459      dsb2mite_switches.penalty_cycles                                     (57,28%)
           884 486      frontend_retired.dsb_miss                                     (57,05%)
     6 782 598 787      cycles                                                        (56,82%)
       1,749000366 seconds time elapsed
       1,748985000 seconds user
       0,000000000 seconds sys
This is exactly what I expected to get.
The vast majority of uops came from uops cache. Also uops number perfectly matches with my expectation
mov edx, esi - 1 uop;
jmp imm      - 1 uop; near 
dec rdi      - 1 uop;
ja           - 1 uop; near
4096 * 4096 * 128 * 9 = 19 327 352 832 approximately equal to the counters 19 326 755 442 + 3 836 395 + 1 642 975
CASE 2:
Consider the implementation of inhibit_uops_cache which is different by one instruction commented out:
align 32
inhibit_uops_cache:
    mov edx, esi
    mov edx, esi
    mov edx, esi
    mov edx, esi
    mov edx, esi
    ; mov edx, esi
    jmp decrement_jmp_tgt
decrement_jmp_tgt:
    dec rdi
    ja inhibit_uops_cache ;ja is intentional to avoid Macro-fusion
    ret
disas:
0x555555554820 <inhibit_uops_cache>     mov    edx,esi
0x555555554822 <inhibit_uops_cache+2>   mov    edx,esi
0x555555554824 <inhibit_uops_cache+4>   mov    edx,esi
0x555555554826 <inhibit_uops_cache+6>   mov    edx,esi
0x555555554828 <inhibit_uops_cache+8>   mov    edx,esi
0x55555555482a <inhibit_uops_cache+10>  jmp    0x55555555482c <decrement_jmp_tgt>
0x55555555482c <decrement_jmp_tgt>      dec    rdi
0x55555555482f <decrement_jmp_tgt+3>    ja     0x555555554820 <inhibit_uops_cache>
0x555555554831 <decrement_jmp_tgt+5>    ret
0x555555554832 <decrement_jmp_tgt+6>    nop
0x555555554833 <decrement_jmp_tgt+7>    nop
0x555555554834 <decrement_jmp_tgt+8>    nop
0x555555554835 <decrement_jmp_tgt+9>    nop
0x555555554836 <decrement_jmp_tgt+10>   nop
0x555555554837 <decrement_jmp_tgt+11>   nop
0x555555554838 <decrement_jmp_tgt+12>   nop
0x555555554839 <decrement_jmp_tgt+13>   nop
0x55555555483a <decrement_jmp_tgt+14>   nop
0x55555555483b <decrement_jmp_tgt+15>   nop
0x55555555483c <decrement_jmp_tgt+16>   nop
0x55555555483d <decrement_jmp_tgt+17>   nop
0x55555555483e <decrement_jmp_tgt+18>   nop
0x55555555483f <decrement_jmp_tgt+19>   nop                      
running as
int main(void){
    inhibit_uops_cache(4096 * 4096 * 128L);
}
I got the counters
 Performance counter stats for './bin':
     2 464 970 970      idq.dsb_cycles                                                (56,93%)
     6 197 024 207      idq.dsb_uops                                                  (57,01%)
    10 845 763 859      idq.mite_uops                                                 (57,19%)
         3 022 089      idq.ms_uops                                                   (57,38%)
           321 614      dsb2mite_switches.penalty_cycles                                     (57,35%)
     1 733 465 236      frontend_retired.dsb_miss                                     (57,16%)
     8 405 643 642      cycles                                                        (56,97%)
       2,117538141 seconds time elapsed
       2,117511000 seconds user
       0,000000000 seconds sys
The counters are completely unexpected.
I expected all the uops come from dsb as before since the routine matches the requirements of uops cache.
By contrast, almost 70% of uops came from Legacy Decode Pipeline.
QUESTION: What's wrong with the CASE 2? What counters to look at to understand what's going on?
UPD: Following @PeterCordes idea I checked the 32-byte alignment of the unconditional branch target decrement_jmp_tgt. Here is the result:
CASE 3:
Aligning onconditional jump target to 32 byte as follows
align 32
inhibit_uops_cache:
    mov edx, esi
    mov edx, esi
    mov edx, esi
    mov edx, esi
    mov edx, esi
    ; mov edx, esi
    jmp decrement_jmp_tgt
align 32 ; align 16 does not change anything
decrement_jmp_tgt:
    dec rdi
    ja inhibit_uops_cache
    ret
disas:
0x555555554820 <inhibit_uops_cache>     mov    edx,esi
0x555555554822 <inhibit_uops_cache+2>   mov    edx,esi
0x555555554824 <inhibit_uops_cache+4>   mov    edx,esi
0x555555554826 <inhibit_uops_cache+6>   mov    edx,esi
0x555555554828 <inhibit_uops_cache+8>   mov    edx,esi
0x55555555482a <inhibit_uops_cache+10>  jmp    0x555555554840 <decrement_jmp_tgt>
#nops to meet the alignment
0x555555554840 <decrement_jmp_tgt>      dec    rdi
0x555555554843 <decrement_jmp_tgt+3>    ja     0x555555554820 <inhibit_uops_cache>
0x555555554845 <decrement_jmp_tgt+5>    ret                                              
and running as
int main(void){
    inhibit_uops_cache(4096 * 4096 * 128L);
}
I got the following counters
 Performance counter stats for './bin':
     4 296 298 295      idq.dsb_cycles                                                (57,19%)
    17 145 751 147      idq.dsb_uops                                                  (57,32%)
        45 834 799      idq.mite_uops                                                 (57,32%)
         1 896 769      idq.ms_uops                                                   (57,32%)
           136 865      dsb2mite_switches.penalty_cycles                                     (57,04%)
           161 314      frontend_retired.dsb_miss                                     (56,90%)
     4 319 137 397      cycles                                                        (56,91%)
       1,096792233 seconds time elapsed
       1,096759000 seconds user
       0,000000000 seconds sys
The result is perfectly expected. More then 99% of the uops came from dsb.
Avg dsb uops delivery rate = 17 145 751 147 / 4 296 298 295 = 3.99
Which is close to the peak bandwith.
 
    
 
    
