Skip to content

Commit edbaf19

Browse files
authored
[AMDGPU] Fix a potential integer overflow in GCNRegPressure when true16 is enabled (#144968)
Fixes SWDEV-537014.
1 parent 379a609 commit edbaf19

File tree

2 files changed

+72
-4
lines changed

2 files changed

+72
-4
lines changed

llvm/lib/Target/AMDGPU/GCNRegPressure.cpp

Lines changed: 24 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -44,16 +44,19 @@ void GCNRegPressure::inc(unsigned Reg,
4444
LaneBitmask PrevMask,
4545
LaneBitmask NewMask,
4646
const MachineRegisterInfo &MRI) {
47-
if (SIRegisterInfo::getNumCoveredRegs(NewMask) ==
48-
SIRegisterInfo::getNumCoveredRegs(PrevMask))
47+
unsigned NewNumCoveredRegs = SIRegisterInfo::getNumCoveredRegs(NewMask);
48+
unsigned PrevNumCoveredRegs = SIRegisterInfo::getNumCoveredRegs(PrevMask);
49+
if (NewNumCoveredRegs == PrevNumCoveredRegs)
4950
return;
5051

5152
int Sign = 1;
5253
if (NewMask < PrevMask) {
5354
std::swap(NewMask, PrevMask);
55+
std::swap(NewNumCoveredRegs, PrevNumCoveredRegs);
5456
Sign = -1;
5557
}
56-
assert(PrevMask < NewMask && "prev mask should always be lesser than new");
58+
assert(PrevMask < NewMask && PrevNumCoveredRegs < NewNumCoveredRegs &&
59+
"prev mask should always be lesser than new");
5760

5861
const TargetRegisterClass *RC = MRI.getRegClass(Reg);
5962
const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo();
@@ -66,7 +69,24 @@ void GCNRegPressure::inc(unsigned Reg,
6669
Value[TupleIdx] += Sign * TRI->getRegClassWeight(RC).RegWeight;
6770
}
6871
// Pressure scales with number of new registers covered by the new mask.
69-
Sign *= SIRegisterInfo::getNumCoveredRegs(~PrevMask & NewMask);
72+
// Note when true16 is enabled, we can no longer safely use the following
73+
// approach to calculate the difference in the number of 32-bit registers
74+
// between two masks:
75+
//
76+
// Sign *= SIRegisterInfo::getNumCoveredRegs(~PrevMask & NewMask);
77+
//
78+
// The issue is that the mask calculation `~PrevMask & NewMask` doesn't
79+
// properly account for partial usage of a 32-bit register when dealing with
80+
// 16-bit registers.
81+
//
82+
// Consider this example:
83+
// Assume PrevMask = 0b0010 and NewMask = 0b1111. Here, the correct register
84+
// usage difference should be 1, because even though PrevMask uses only half
85+
// of a 32-bit register, it should still be counted as a full register use.
86+
// However, the mask calculation yields `~PrevMask & NewMask = 0b1101`, and
87+
// calling `getNumCoveredRegs` returns 2 instead of 1. This incorrect
88+
// calculation can lead to integer overflow when Sign = -1.
89+
Sign *= NewNumCoveredRegs - PrevNumCoveredRegs;
7090
}
7191
Value[RegKind] += Sign;
7292
}
Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
2+
# RUN: llc -x mir -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1102 -run-pass=machine-scheduler %s -o - | FileCheck %s
3+
4+
---
5+
name: foo
6+
tracksRegLiveness: true
7+
liveins:
8+
- { reg: '$sgpr4_sgpr5', virtual-reg: '%0' }
9+
body: |
10+
bb.0.entry:
11+
liveins: $sgpr4_sgpr5
12+
13+
; CHECK-LABEL: name: foo
14+
; CHECK: liveins: $sgpr4_sgpr5
15+
; CHECK-NEXT: {{ $}}
16+
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5
17+
; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
18+
; CHECK-NEXT: undef [[COPY1:%[0-9]+]].sub0:sgpr_128 = COPY [[S_MOV_B32_]]
19+
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub1:sgpr_128 = COPY [[S_MOV_B32_]]
20+
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub2:sgpr_128 = COPY [[S_MOV_B32_]]
21+
; CHECK-NEXT: [[COPY1:%[0-9]+]].sub3:sgpr_128 = COPY [[S_MOV_B32_]]
22+
; CHECK-NEXT: [[BUFFER_LOAD_DWORDX2_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET [[COPY1]], 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 8)
23+
; CHECK-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 0, 0 :: (dereferenceable invariant load (s64), align 16, addrspace 4)
24+
; CHECK-NEXT: [[V_LSHRREV_B64_e64_:%[0-9]+]]:vreg_64 = V_LSHRREV_B64_e64 24, [[BUFFER_LOAD_DWORDX2_OFFSET]], implicit $exec
25+
; CHECK-NEXT: undef [[COPY2:%[0-9]+]].lo16:vgpr_32 = COPY [[V_LSHRREV_B64_e64_]].lo16
26+
; CHECK-NEXT: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e64 16, [[COPY2]], implicit $exec
27+
; CHECK-NEXT: [[COPY3:%[0-9]+]]:vreg_64 = COPY [[S_LOAD_DWORDX2_IMM]]
28+
; CHECK-NEXT: [[V_PK_LSHLREV_B16_:%[0-9]+]]:vgpr_32 = V_PK_LSHLREV_B16 0, 8, 8, [[V_LSHLREV_B32_e64_]], 0, 0, 0, 0, 0, implicit $exec
29+
; CHECK-NEXT: FLAT_STORE_DWORD [[COPY3]], [[V_PK_LSHLREV_B16_]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32))
30+
; CHECK-NEXT: S_WAITCNT 0
31+
; CHECK-NEXT: S_ENDPGM 0
32+
%0:sgpr_64(p4) = COPY killed $sgpr4_sgpr5
33+
%1:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed %0(p4), 0, 0 :: (dereferenceable invariant load (s64), align 16, addrspace 4)
34+
%2:sreg_32 = S_MOV_B32 0
35+
undef %3.sub0:sgpr_128 = COPY %2
36+
%3.sub1:sgpr_128 = COPY %2
37+
%3.sub2:sgpr_128 = COPY %2
38+
%3.sub3:sgpr_128 = COPY killed %2
39+
%4:vreg_64 = BUFFER_LOAD_DWORDX2_OFFSET killed %3, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 1, addrspace 8)
40+
%5:vreg_64 = V_LSHRREV_B64_e64 24, killed %4, implicit $exec
41+
undef %6.lo16:vgpr_32 = COPY killed %5.lo16
42+
%7:vgpr_32 = V_LSHLREV_B32_e64 16, killed %6, implicit $exec
43+
%8:vgpr_32 = V_PK_LSHLREV_B16 0, 8, 8, killed %7, 0, 0, 0, 0, 0, implicit $exec
44+
%9:vreg_64 = COPY killed %1
45+
FLAT_STORE_DWORD killed %9, killed %8, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32))
46+
S_WAITCNT 0
47+
S_ENDPGM 0
48+
...

0 commit comments

Comments
 (0)