|
| 1 | +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5 |
| 2 | +# RUN: llc -x mir -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1102 -run-pass=machine-scheduler %s -o - | FileCheck %s |
| 3 | + |
| 4 | +--- |
| 5 | +name: foo |
| 6 | +tracksRegLiveness: true |
| 7 | +liveins: |
| 8 | + - { reg: '$sgpr4_sgpr5', virtual-reg: '%0' } |
| 9 | +body: | |
| 10 | + bb.0.entry: |
| 11 | + liveins: $sgpr4_sgpr5 |
| 12 | +
|
| 13 | + ; CHECK-LABEL: name: foo |
| 14 | + ; CHECK: liveins: $sgpr4_sgpr5 |
| 15 | + ; CHECK-NEXT: {{ $}} |
| 16 | + ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5 |
| 17 | + ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 |
| 18 | + ; CHECK-NEXT: undef [[COPY1:%[0-9]+]].sub0:sgpr_128 = COPY [[S_MOV_B32_]] |
| 19 | + ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub1:sgpr_128 = COPY [[S_MOV_B32_]] |
| 20 | + ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2:sgpr_128 = COPY [[S_MOV_B32_]] |
| 21 | + ; CHECK-NEXT: [[COPY1:%[0-9]+]].sub3:sgpr_128 = COPY [[S_MOV_B32_]] |
| 22 | + ; CHECK-NEXT: [[BUFFER_LOAD_DWORDX2_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET [[COPY1]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 8) |
| 23 | + ; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 0, 0 :: (dereferenceable invariant load (s64), align 16, addrspace 4) |
| 24 | + ; CHECK-NEXT: [[V_LSHRREV_B64_e64_:%[0-9]+]]:vreg_64 = V_LSHRREV_B64_e64 24, [[BUFFER_LOAD_DWORDX2_OFFSET]], implicit $exec |
| 25 | + ; CHECK-NEXT: undef [[COPY2:%[0-9]+]].lo16:vgpr_32 = COPY [[V_LSHRREV_B64_e64_]].lo16 |
| 26 | + ; CHECK-NEXT: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e64 16, [[COPY2]], implicit $exec |
| 27 | + ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vreg_64 = COPY [[S_LOAD_DWORDX2_IMM]] |
| 28 | + ; CHECK-NEXT: [[V_PK_LSHLREV_B16_:%[0-9]+]]:vgpr_32 = V_PK_LSHLREV_B16 0, 8, 8, [[V_LSHLREV_B32_e64_]], 0, 0, 0, 0, 0, implicit $exec |
| 29 | + ; CHECK-NEXT: FLAT_STORE_DWORD [[COPY3]], [[V_PK_LSHLREV_B16_]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32)) |
| 30 | + ; CHECK-NEXT: S_WAITCNT 0 |
| 31 | + ; CHECK-NEXT: S_ENDPGM 0 |
| 32 | + %0:sgpr_64(p4) = COPY killed $sgpr4_sgpr5 |
| 33 | + %1:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed %0(p4), 0, 0 :: (dereferenceable invariant load (s64), align 16, addrspace 4) |
| 34 | + %2:sreg_32 = S_MOV_B32 0 |
| 35 | + undef %3.sub0:sgpr_128 = COPY %2 |
| 36 | + %3.sub1:sgpr_128 = COPY %2 |
| 37 | + %3.sub2:sgpr_128 = COPY %2 |
| 38 | + %3.sub3:sgpr_128 = COPY killed %2 |
| 39 | + %4:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET killed %3, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 8) |
| 40 | + %5:vreg_64 = V_LSHRREV_B64_e64 24, killed %4, implicit $exec |
| 41 | + undef %6.lo16:vgpr_32 = COPY killed %5.lo16 |
| 42 | + %7:vgpr_32 = V_LSHLREV_B32_e64 16, killed %6, implicit $exec |
| 43 | + %8:vgpr_32 = V_PK_LSHLREV_B16 0, 8, 8, killed %7, 0, 0, 0, 0, 0, implicit $exec |
| 44 | + %9:vreg_64 = COPY killed %1 |
| 45 | + FLAT_STORE_DWORD killed %9, killed %8, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32)) |
| 46 | + S_WAITCNT 0 |
| 47 | + S_ENDPGM 0 |
| 48 | +... |
0 commit comments