-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[RISCV][NFC] Remove hasStdExtCOrZca #145139
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
As of 20b5728, C always enables Zca, so the check `C || Zca` is equivalent to just checking for `Zca`. This replaces any uses of `HasStdExtCOrZca` with a new `HasStdExtZca` (with the same assembler description, to avoid changes in error messages), and simplifies everywhere where C++ needed to check for either C or Zca. The Subtarget function is just deprecated for the moment.
@llvm/pr-subscribers-backend-risc-v Author: Sam Elliott (lenary) ChangesAs of 20b5728, C always enables Zca, so the check This replaces any uses of The Subtarget function is just deprecated for the moment. Patch is 33.49 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/145139.diff 16 Files Affected:
diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
index 45946d3efe32e..9c99142914043 100644
--- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
+++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
@@ -3296,9 +3296,6 @@ bool isValidInsnFormat(StringRef Format, const MCSubtargetInfo &STI) {
bool RISCVAsmParser::parseDirectiveInsn(SMLoc L) {
MCAsmParser &Parser = getParser();
- bool AllowC = getSTI().hasFeature(RISCV::FeatureStdExtC) ||
- getSTI().hasFeature(RISCV::FeatureStdExtZca);
-
// Expect instruction format as identifier.
StringRef Format;
SMLoc ErrorLoc = Parser.getTok().getLoc();
@@ -3342,7 +3339,8 @@ bool RISCVAsmParser::parseDirectiveInsn(SMLoc L) {
return Error(ErrorLoc, "encoding value does not fit into instruction");
}
- if (!AllowC && (EncodingDerivedLength == 2))
+ if (!getSTI().hasFeature(RISCV::FeatureStdExtZca) &&
+ (EncodingDerivedLength == 2))
return Error(ErrorLoc, "compressed instructions are not allowed");
if (getParser().parseEOL("invalid operand for instruction")) {
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp
index 186296944efde..a6c2d2fc29119 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp
@@ -425,11 +425,9 @@ bool RISCVAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
Count -= 1;
}
- bool UseCompressedNop = STI->hasFeature(RISCV::FeatureStdExtC) ||
- STI->hasFeature(RISCV::FeatureStdExtZca);
- // The canonical nop on RVC is c.nop.
if (Count % 4 == 2) {
- OS.write(UseCompressedNop ? "\x01\0" : "\0\0", 2);
+ // The canonical nop with Zca is c.nop.
+ OS.write(STI->hasFeature(RISCV::FeatureStdExtZca) ? "\x01\0" : "\0\0", 2);
Count -= 2;
}
@@ -857,9 +855,7 @@ bool RISCVAsmBackend::shouldInsertExtraNopBytesForCodeAlign(
if (!STI->hasFeature(RISCV::FeatureRelax))
return false;
- bool UseCompressedNop = STI->hasFeature(RISCV::FeatureStdExtC) ||
- STI->hasFeature(RISCV::FeatureStdExtZca);
- unsigned MinNopLen = UseCompressedNop ? 2 : 4;
+ unsigned MinNopLen = STI->hasFeature(RISCV::FeatureStdExtZca) ? 2 : 4;
if (AF.getAlignment() <= MinNopLen) {
return false;
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp
index b50913be99226..ce0fbc0ac0654 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp
@@ -280,8 +280,7 @@ void RISCVMCCodeEmitter::expandLongCondBr(const MCInst &MI,
Opcode == RISCV::PseudoLongBNE || Opcode == RISCV::PseudoLongBEQ;
bool UseCompressedBr = false;
- if (IsEqTest && (STI.hasFeature(RISCV::FeatureStdExtC) ||
- STI.hasFeature(RISCV::FeatureStdExtZca))) {
+ if (IsEqTest && STI.hasFeature(RISCV::FeatureStdExtZca)) {
if (RISCV::X8 <= SrcReg1.id() && SrcReg1.id() <= RISCV::X15 &&
SrcReg2.id() == RISCV::X0) {
UseCompressedBr = true;
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCObjectFileInfo.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCObjectFileInfo.cpp
index ac7d3b785ab1b..6ad49926f5a36 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCObjectFileInfo.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCObjectFileInfo.cpp
@@ -19,9 +19,7 @@ using namespace llvm;
unsigned
RISCVMCObjectFileInfo::getTextSectionAlignment(const MCSubtargetInfo &STI) {
- bool RVC = STI.hasFeature(RISCV::FeatureStdExtC) ||
- STI.hasFeature(RISCV::FeatureStdExtZca);
- return RVC ? 2 : 4;
+ return STI.hasFeature(RISCV::FeatureStdExtZca) ? 2 : 4;
}
unsigned RISCVMCObjectFileInfo::getTextSectionAlignment() const {
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
index 747e462ee9d56..787a38ffb7be6 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp
@@ -538,8 +538,7 @@ InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI,
int getIntMatCost(const APInt &Val, unsigned Size, const MCSubtargetInfo &STI,
bool CompressionCost, bool FreeZeroes) {
bool IsRV64 = STI.hasFeature(RISCV::Feature64Bit);
- bool HasRVC = CompressionCost && (STI.hasFeature(RISCV::FeatureStdExtC) ||
- STI.hasFeature(RISCV::FeatureStdExtZca));
+ bool HasRVC = CompressionCost && STI.hasFeature(RISCV::FeatureStdExtZca);
int PlatRegSize = IsRV64 ? 64 : 32;
// Split the constant into platform register sized chunks, and calculate cost
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp
index 6df8b182885b8..36558613d9172 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp
@@ -115,8 +115,7 @@ void RISCVTargetStreamer::setTargetABI(RISCVABI::ABI ABI) {
}
void RISCVTargetStreamer::setFlagsFromFeatures(const MCSubtargetInfo &STI) {
- HasRVC = STI.hasFeature(RISCV::FeatureStdExtC) ||
- STI.hasFeature(RISCV::FeatureStdExtZca);
+ HasRVC = STI.hasFeature(RISCV::FeatureStdExtZca);
HasTSO = STI.hasFeature(RISCV::FeatureStdExtZtso);
}
diff --git a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
index d4d7de289a107..0e59861b8a786 100644
--- a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
+++ b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
@@ -132,7 +132,7 @@ class RISCVAsmPrinter : public AsmPrinter {
void RISCVAsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
const MachineInstr &MI) {
- unsigned NOPBytes = STI->hasStdExtCOrZca() ? 2 : 4;
+ unsigned NOPBytes = STI->hasStdExtZca() ? 2 : 4;
unsigned NumNOPBytes = StackMapOpers(&MI).getNumPatchBytes();
auto &Ctx = OutStreamer.getContext();
@@ -165,7 +165,7 @@ void RISCVAsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
// [<def>], <id>, <numBytes>, <target>, <numArgs>
void RISCVAsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
const MachineInstr &MI) {
- unsigned NOPBytes = STI->hasStdExtCOrZca() ? 2 : 4;
+ unsigned NOPBytes = STI->hasStdExtZca() ? 2 : 4;
auto &Ctx = OutStreamer.getContext();
MCSymbol *MILabel = Ctx.createTempSymbol();
@@ -214,7 +214,7 @@ void RISCVAsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
void RISCVAsmPrinter::LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
const MachineInstr &MI) {
- unsigned NOPBytes = STI->hasStdExtCOrZca() ? 2 : 4;
+ unsigned NOPBytes = STI->hasStdExtZca() ? 2 : 4;
StatepointOpers SOpers(&MI);
if (unsigned PatchBytes = SOpers.getNumPatchBytes()) {
@@ -292,7 +292,7 @@ void RISCVAsmPrinter::emitNTLHint(const MachineInstr *MI) {
NontemporalMode += 0b10;
MCInst Hint;
- if (STI->hasStdExtCOrZca() && STI->enableRVCHintInstrs())
+ if (STI->hasStdExtZca() && STI->enableRVCHintInstrs())
Hint.setOpcode(RISCV::C_ADD_HINT);
else
Hint.setOpcode(RISCV::ADD);
@@ -674,7 +674,7 @@ void RISCVAsmPrinter::LowerKCFI_CHECK(const MachineInstr &MI) {
} else {
// Adjust the offset for patchable-function-prefix. This assumes that
// patchable-function-prefix is the same for all functions.
- int NopSize = STI->hasStdExtCOrZca() ? 2 : 4;
+ int NopSize = STI->hasStdExtZca() ? 2 : 4;
int64_t PrefixNops = 0;
(void)MI.getMF()
->getFunction()
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index 6df6368929dac..84a0b636782b1 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -385,6 +385,13 @@ def FeatureStdExtZca
"part of the C extension, excluding compressed "
"floating point loads/stores">,
RISCVExtensionBitmask<1, 2>;
+// FIXME: Update this message - Zca always implies C.
+def HasStdExtZca
+ : Predicate<"Subtarget->hasStdExtZca()">,
+ AssemblerPredicate<(any_of FeatureStdExtZca),
+ "'C' (Compressed Instructions) or "
+ "'Zca' (part of the C extension, excluding "
+ "compressed floating point loads/stores)">;
def FeatureStdExtC
: RISCVExtension<2, 0, "Compressed Instructions", [FeatureStdExtZca]>,
@@ -393,14 +400,6 @@ def HasStdExtC : Predicate<"Subtarget->hasStdExtC()">,
AssemblerPredicate<(all_of FeatureStdExtC),
"'C' (Compressed Instructions)">;
-
-def HasStdExtCOrZca
- : Predicate<"Subtarget->hasStdExtCOrZca()">,
- AssemblerPredicate<(any_of FeatureStdExtC, FeatureStdExtZca),
- "'C' (Compressed Instructions) or "
- "'Zca' (part of the C extension, excluding "
- "compressed floating point loads/stores)">;
-
def FeatureStdExtZcb
: RISCVExtension<1, 0, "Compressed basic bit manipulation instructions",
[FeatureStdExtZca]>,
diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
index 59d99e613c0f4..a796c910bd449 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -1688,19 +1688,19 @@ static unsigned estimateFunctionSizeInBytes(const MachineFunction &MF,
//
// foo
// bne t5, t6, .rev_cond # `TII->getInstSizeInBytes(MI)` bytes
- // sd s11, 0(sp) # 4 bytes, or 2 bytes in RVC
+ // sd s11, 0(sp) # 4 bytes, or 2 bytes with Zca
// jump .restore, s11 # 8 bytes
// .rev_cond
// bar
- // j .dest_bb # 4 bytes, or 2 bytes in RVC
+ // j .dest_bb # 4 bytes, or 2 bytes with Zca
// .restore:
- // ld s11, 0(sp) # 4 bytes, or 2 bytes in RVC
+ // ld s11, 0(sp) # 4 bytes, or 2 bytes with Zca
// .dest:
// baz
if (MI.isConditionalBranch())
FnSize += TII.getInstSizeInBytes(MI);
if (MI.isConditionalBranch() || MI.isUnconditionalBranch()) {
- if (MF.getSubtarget<RISCVSubtarget>().hasStdExtCOrZca())
+ if (MF.getSubtarget<RISCVSubtarget>().hasStdExtZca())
FnSize += 2 + 8 + 2 + 2;
else
FnSize += 4 + 8 + 4 + 4;
@@ -1865,7 +1865,7 @@ RISCVFrameLowering::getFirstSPAdjustAmount(const MachineFunction &MF) const {
// instructions be compressed, so try to adjust the amount to the largest
// offset that stack compression instructions accept when target supports
// compression instructions.
- if (STI.hasStdExtCOrZca()) {
+ if (STI.hasStdExtZca()) {
// The compression extensions may support the following instructions:
// riscv32: c.lwsp rd, offset[7:2] => 2^(6 + 2)
// c.swsp rs2, offset[7:2] => 2^(6 + 2)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 0c54101a11568..88813ed31229e 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1598,7 +1598,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
}
// Function alignments.
- const Align FunctionAlignment(Subtarget.hasStdExtCOrZca() ? 2 : 4);
+ const Align FunctionAlignment(Subtarget.hasStdExtZca() ? 2 : 4);
setMinFunctionAlignment(FunctionAlignment);
// Set preferred alignments.
setPrefFunctionAlignment(Subtarget.getPrefFunctionAlignment());
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index d9ef911b9a32e..6e30ffce99c4d 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -88,7 +88,7 @@ RISCVInstrInfo::RISCVInstrInfo(RISCVSubtarget &STI)
#include "RISCVGenInstrInfo.inc"
MCInst RISCVInstrInfo::getNop() const {
- if (STI.hasStdExtCOrZca())
+ if (STI.hasStdExtZca())
return MCInstBuilder(RISCV::C_NOP);
return MCInstBuilder(RISCV::ADDI)
.addReg(RISCV::X0)
@@ -1717,7 +1717,7 @@ unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
if (!MI.memoperands_empty()) {
MachineMemOperand *MMO = *(MI.memoperands_begin());
if (STI.hasStdExtZihintntl() && MMO->isNonTemporal()) {
- if (STI.hasStdExtCOrZca() && STI.enableRVCHintInstrs()) {
+ if (STI.hasStdExtZca() && STI.enableRVCHintInstrs()) {
if (isCompressibleInst(MI, STI))
return 4; // c.ntl.all + c.load/c.store
return 6; // c.ntl.all + load/store
@@ -1738,7 +1738,7 @@ unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
case RISCV::PseudoMV_FPR16INX:
case RISCV::PseudoMV_FPR32INX:
// MV is always compressible to either c.mv or c.li rd, 0.
- return STI.hasStdExtCOrZca() ? 2 : 4;
+ return STI.hasStdExtZca() ? 2 : 4;
case TargetOpcode::STACKMAP:
// The upper bound for a stackmap intrinsic is the full length of its shadow
return StackMapOpers(&MI).getNumPatchBytes();
@@ -1765,7 +1765,7 @@ unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
return get(Opcode).getSize();
// Number of C.NOP or NOP
- return (STI.hasStdExtCOrZca() ? 2 : 4) * Num;
+ return (STI.hasStdExtZca() ? 2 : 4) * Num;
}
// XRay uses C.JAL + 21 or 33 C.NOP for each sled in RV32 and RV64,
// respectively.
@@ -3341,14 +3341,14 @@ RISCVInstrInfo::getOutliningCandidateInfo(
// Each RepeatedSequenceLoc is identical.
outliner::Candidate &Candidate = RepeatedSequenceLocs[0];
unsigned InstrSizeCExt =
- Candidate.getMF()->getSubtarget<RISCVSubtarget>().hasStdExtCOrZca() ? 2
- : 4;
+ Candidate.getMF()->getSubtarget<RISCVSubtarget>().hasStdExtZca() ? 2 : 4;
unsigned CallOverhead = 0, FrameOverhead = 0;
MachineOutlinerConstructionID MOCI = MachineOutlinerDefault;
if (Candidate.back().isReturn()) {
MOCI = MachineOutlinerTailCall;
// tail call = auipc + jalr in the worst case without linker relaxation.
+ // FIXME: This code suggests the JALR can be compressed - how?
CallOverhead = 4 + InstrSizeCExt;
// Using tail call we move ret instruction from caller to callee.
FrameOverhead = 0;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 70fad925cf070..aa1ebba567824 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -1686,7 +1686,7 @@ defm : BccPat<SETGE, BGE>;
defm : BccPat<SETULT, BLTU>;
defm : BccPat<SETUGE, BGEU>;
-let Predicates = [HasStdExtCOrZca, OptForMinSize] in {
+let Predicates = [HasStdExtZca, OptForMinSize] in {
def : BrccCompressOpt<SETEQ, BEQ>;
def : BrccCompressOpt<SETNE, BNE>;
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoC.td b/llvm/lib/Target/RISCV/RISCVInstrInfoC.td
index fd8591f5ab2d8..f7e8ee61ebba8 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoC.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoC.td
@@ -293,7 +293,7 @@ class CA_ALU<bits<6> funct6, bits<2> funct2, string OpcodeStr>
// Instructions
//===----------------------------------------------------------------------===//
-let Predicates = [HasStdExtCOrZca] in {
+let Predicates = [HasStdExtZca] in {
let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [X2] in
def C_ADDI4SPN : RVInst16CIW<0b000, 0b00, (outs GPRC:$rd),
@@ -342,7 +342,7 @@ def C_FLW : CLoad_ri<0b011, "c.flw", FPR32C, uimm7_lsb00>,
let Inst{5} = imm{6};
}
-let Predicates = [HasStdExtCOrZca, IsRV64] in
+let Predicates = [HasStdExtZca, IsRV64] in
def C_LD : CLoad_ri<0b011, "c.ld", GPRC, uimm8_lsb000>,
Sched<[WriteLDD, ReadMemBase]> {
bits<8> imm;
@@ -385,7 +385,7 @@ def C_FSW : CStore_rri<0b111, "c.fsw", FPR32C, uimm7_lsb00>,
let Inst{5} = imm{6};
}
-let Predicates = [HasStdExtCOrZca, IsRV64] in
+let Predicates = [HasStdExtZca, IsRV64] in
def C_SD : CStore_rri<0b111, "c.sd", GPRC, uimm8_lsb000>,
Sched<[WriteSTD, ReadStoreData, ReadMemBase]> {
bits<8> imm;
@@ -416,12 +416,12 @@ def PseudoC_ADDI_NOP : Pseudo<(outs GPRX0:$rd), (ins GPRX0:$rs1, immzero:$imm),
let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCall = 1,
DecoderNamespace = "RV32Only", Defs = [X1],
- Predicates = [HasStdExtCOrZca, IsRV32] in
+ Predicates = [HasStdExtZca, IsRV32] in
def C_JAL : RVInst16CJ<0b001, 0b01, (outs), (ins bare_simm12_lsb0:$offset),
"c.jal", "$offset">, Sched<[WriteJal]>;
let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
- Predicates = [HasStdExtCOrZca, IsRV64] in
+ Predicates = [HasStdExtZca, IsRV64] in
def C_ADDIW : RVInst16CI<0b001, 0b01, (outs GPRNoX0:$rd_wb),
(ins GPRNoX0:$rd, simm6:$imm),
"c.addiw", "$rd, $imm">,
@@ -478,7 +478,7 @@ def C_OR : CA_ALU<0b100011, 0b10, "c.or">,
def C_AND : CA_ALU<0b100011, 0b11, "c.and">,
Sched<[WriteIALU, ReadIALU, ReadIALU]>;
-let Predicates = [HasStdExtCOrZca, IsRV64] in {
+let Predicates = [HasStdExtZca, IsRV64] in {
def C_SUBW : CA_ALU<0b100111, 0b00, "c.subw">,
Sched<[WriteIALU32, ReadIALU32, ReadIALU32]>;
def C_ADDW : CA_ALU<0b100111, 0b01, "c.addw">,
@@ -528,7 +528,7 @@ def C_FLWSP : CStackLoad<0b011, "c.flwsp", FPR32, uimm8_lsb00>,
let Inst{3-2} = imm{7-6};
}
-let Predicates = [HasStdExtCOrZca, IsRV64] in
+let Predicates = [HasStdExtZca, IsRV64] in
def C_LDSP : CStackLoad<0b011, "c.ldsp", GPRNoX0, uimm9_lsb000>,
Sched<[WriteLDD, ReadMemBase]> {
let Inst{4-2} = imm{8-6};
@@ -588,7 +588,7 @@ def C_FSWSP : CStackStore<0b111, "c.fswsp", FPR32, uimm8_lsb00>,
let Inst{8-7} = imm{7-6};
}
-let Predicates = [HasStdExtCOrZca, IsRV64] in
+let Predicates = [HasStdExtZca, IsRV64] in
def C_SDSP : CStackStore<0b111, "c.sdsp", GPR, uimm9_lsb000>,
Sched<[WriteSTD, ReadStoreData, ReadMemBase]> {
let Inst{9-7} = imm{8-6};
@@ -602,13 +602,13 @@ def C_UNIMP : RVInst16<(outs), (ins), "c.unimp", "", [], InstFormatOther>,
let Inst{15-0} = 0;
}
-} // Predicates = [HasStdExtCOrZca]
+} // Predicates = [HasStdExtZca]
//===----------------------------------------------------------------------===//
// HINT Instructions
//===----------------------------------------------------------------------===//
-let Predicates = [HasStdExtCOrZca, HasRVCHints], hasSideEffects = 0, mayLoad = 0,
+let Predicates = [HasStdExtZca, HasRVCHints], hasSideEffects = 0, mayLoad = 0,
mayStore = 0 in {
def C_NOP_HINT : RVInst16CI<0b000, 0b01, (outs), (ins simm6nonzero:$imm),
@@ -691,14 +691,14 @@ def C_SRAI64_HINT : RVInst16CB<0b100, 0b01, (outs GPRC:$rd),
let Inst{12} = 0;
}
-} // Predicates = [HasStdExtCOrZca, HasRVCHints], hasSideEffects = 0, mayLoad = 0,
+} // Predicates = [HasStdExtZca, HasRVCHints], hasSideEffects = 0, mayLoad = 0,
// mayStore = 0
//===----------------------------------------------------------------------===//
// Assembler Pseudo Instructions
//===----------------------------------------------------------------------===//
-let Predicates = [HasStdExtCOrZca, HasRVCHints] in {
+let Predicates = [HasStdExtZca, HasRVCHints] in {
// Just a different syntax for the c.nop hint: c.addi x0, simm6 vs c.nop simm6.
def : InstAlias<"c.addi x0, $imm", (C_NOP_HINT simm6nonzero:$imm), 0>;
}
@@ -711,14 +711,14 @@ def : InstAlias<"c.ntl.all", (C_ADD_HINT X0, X5)>;
} // Predicates = [HasStdExtC, HasRVCHints, HasStdExtZihintntl]
let EmitPriority = 0 in {
-let Predicates = [HasStdExtCOrZca] in {
+let Predicates = [HasStdExtZca] in {
def : InstAlias<"c.lw $rd, (${rs1})", (C_LW GPRC:$rd, GPRCMem:$rs1, 0)>;
def : InstAlias<"c.sw $rs2, (${rs1})", (C_SW GPRC:$rs2, GPRCMem:$rs1, 0)>;
def : InstAlias<"c.lwsp $rd, (${rs1})", ...
[truncated]
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
As of 20b5728, C always enables Zca, so the check `C || Zca` is equivalent to just checking for `Zca`. This replaces any uses of `HasStdExtCOrZca` with a new `HasStdExtZca` (with the same assembler description, to avoid changes in error messages), and simplifies everywhere where C++ needed to check for either C or Zca. The Subtarget function is just deprecated for the moment.
As of 20b5728, C always enables Zca, so the check
C || Zca
is equivalent to just checking forZca
.This replaces any uses of
HasStdExtCOrZca
with a newHasStdExtZca
(with the same assembler description, to avoid changes in error messages), and simplifies everywhere where C++ needed to check for either C or Zca.The Subtarget function is just deprecated for the moment.