From c66eda46c0e54cf8877941e2f98ee22872c7992c Mon Sep 17 00:00:00 2001 From: Sam Elliott Date: Fri, 20 Jun 2025 20:10:28 -0700 Subject: [PATCH] [RISCV][NFC] Remove hasStdExtCOrZca As of 20b5728b7b1ccc4509a316efb270d46cc9526d69, C always enables Zca, so the check `C || Zca` is equivalent to just checking for `Zca`. This replaces any uses of `HasStdExtCOrZca` with a new `HasStdExtZca` (with the same assembler description, to avoid changes in error messages), and simplifies everywhere where C++ needed to check for either C or Zca. The Subtarget function is just deprecated for the moment. --- .../Target/RISCV/AsmParser/RISCVAsmParser.cpp | 6 +- .../RISCV/MCTargetDesc/RISCVAsmBackend.cpp | 10 +- .../RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp | 3 +- .../MCTargetDesc/RISCVMCObjectFileInfo.cpp | 4 +- .../Target/RISCV/MCTargetDesc/RISCVMatInt.cpp | 3 +- .../MCTargetDesc/RISCVTargetStreamer.cpp | 3 +- llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp | 10 +- llvm/lib/Target/RISCV/RISCVFeatures.td | 15 ++- llvm/lib/Target/RISCV/RISCVFrameLowering.cpp | 10 +- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 2 +- llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 12 +-- llvm/lib/Target/RISCV/RISCVInstrInfo.td | 2 +- llvm/lib/Target/RISCV/RISCVInstrInfoC.td | 100 +++++++++--------- .../Target/RISCV/RISCVMakeCompressible.cpp | 6 +- llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp | 3 +- llvm/lib/Target/RISCV/RISCVSubtarget.h | 6 +- 16 files changed, 92 insertions(+), 103 deletions(-) diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp index 45946d3efe32e..9c99142914043 100644 --- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp +++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp @@ -3296,9 +3296,6 @@ bool isValidInsnFormat(StringRef Format, const MCSubtargetInfo &STI) { bool RISCVAsmParser::parseDirectiveInsn(SMLoc L) { MCAsmParser &Parser = getParser(); - bool AllowC = getSTI().hasFeature(RISCV::FeatureStdExtC) || - getSTI().hasFeature(RISCV::FeatureStdExtZca); - // Expect instruction format as identifier. StringRef Format; SMLoc ErrorLoc = Parser.getTok().getLoc(); @@ -3342,7 +3339,8 @@ bool RISCVAsmParser::parseDirectiveInsn(SMLoc L) { return Error(ErrorLoc, "encoding value does not fit into instruction"); } - if (!AllowC && (EncodingDerivedLength == 2)) + if (!getSTI().hasFeature(RISCV::FeatureStdExtZca) && + (EncodingDerivedLength == 2)) return Error(ErrorLoc, "compressed instructions are not allowed"); if (getParser().parseEOL("invalid operand for instruction")) { diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp index 186296944efde..a6c2d2fc29119 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp @@ -425,11 +425,9 @@ bool RISCVAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count, Count -= 1; } - bool UseCompressedNop = STI->hasFeature(RISCV::FeatureStdExtC) || - STI->hasFeature(RISCV::FeatureStdExtZca); - // The canonical nop on RVC is c.nop. if (Count % 4 == 2) { - OS.write(UseCompressedNop ? "\x01\0" : "\0\0", 2); + // The canonical nop with Zca is c.nop. + OS.write(STI->hasFeature(RISCV::FeatureStdExtZca) ? "\x01\0" : "\0\0", 2); Count -= 2; } @@ -857,9 +855,7 @@ bool RISCVAsmBackend::shouldInsertExtraNopBytesForCodeAlign( if (!STI->hasFeature(RISCV::FeatureRelax)) return false; - bool UseCompressedNop = STI->hasFeature(RISCV::FeatureStdExtC) || - STI->hasFeature(RISCV::FeatureStdExtZca); - unsigned MinNopLen = UseCompressedNop ? 2 : 4; + unsigned MinNopLen = STI->hasFeature(RISCV::FeatureStdExtZca) ? 2 : 4; if (AF.getAlignment() <= MinNopLen) { return false; diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp index b50913be99226..ce0fbc0ac0654 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCCodeEmitter.cpp @@ -280,8 +280,7 @@ void RISCVMCCodeEmitter::expandLongCondBr(const MCInst &MI, Opcode == RISCV::PseudoLongBNE || Opcode == RISCV::PseudoLongBEQ; bool UseCompressedBr = false; - if (IsEqTest && (STI.hasFeature(RISCV::FeatureStdExtC) || - STI.hasFeature(RISCV::FeatureStdExtZca))) { + if (IsEqTest && STI.hasFeature(RISCV::FeatureStdExtZca)) { if (RISCV::X8 <= SrcReg1.id() && SrcReg1.id() <= RISCV::X15 && SrcReg2.id() == RISCV::X0) { UseCompressedBr = true; diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCObjectFileInfo.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCObjectFileInfo.cpp index ac7d3b785ab1b..6ad49926f5a36 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCObjectFileInfo.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMCObjectFileInfo.cpp @@ -19,9 +19,7 @@ using namespace llvm; unsigned RISCVMCObjectFileInfo::getTextSectionAlignment(const MCSubtargetInfo &STI) { - bool RVC = STI.hasFeature(RISCV::FeatureStdExtC) || - STI.hasFeature(RISCV::FeatureStdExtZca); - return RVC ? 2 : 4; + return STI.hasFeature(RISCV::FeatureStdExtZca) ? 2 : 4; } unsigned RISCVMCObjectFileInfo::getTextSectionAlignment() const { diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp index 747e462ee9d56..787a38ffb7be6 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVMatInt.cpp @@ -538,8 +538,7 @@ InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, int getIntMatCost(const APInt &Val, unsigned Size, const MCSubtargetInfo &STI, bool CompressionCost, bool FreeZeroes) { bool IsRV64 = STI.hasFeature(RISCV::Feature64Bit); - bool HasRVC = CompressionCost && (STI.hasFeature(RISCV::FeatureStdExtC) || - STI.hasFeature(RISCV::FeatureStdExtZca)); + bool HasRVC = CompressionCost && STI.hasFeature(RISCV::FeatureStdExtZca); int PlatRegSize = IsRV64 ? 64 : 32; // Split the constant into platform register sized chunks, and calculate cost diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp index 6df8b182885b8..36558613d9172 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVTargetStreamer.cpp @@ -115,8 +115,7 @@ void RISCVTargetStreamer::setTargetABI(RISCVABI::ABI ABI) { } void RISCVTargetStreamer::setFlagsFromFeatures(const MCSubtargetInfo &STI) { - HasRVC = STI.hasFeature(RISCV::FeatureStdExtC) || - STI.hasFeature(RISCV::FeatureStdExtZca); + HasRVC = STI.hasFeature(RISCV::FeatureStdExtZca); HasTSO = STI.hasFeature(RISCV::FeatureStdExtZtso); } diff --git a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp index d4d7de289a107..0e59861b8a786 100644 --- a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp +++ b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp @@ -132,7 +132,7 @@ class RISCVAsmPrinter : public AsmPrinter { void RISCVAsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM, const MachineInstr &MI) { - unsigned NOPBytes = STI->hasStdExtCOrZca() ? 2 : 4; + unsigned NOPBytes = STI->hasStdExtZca() ? 2 : 4; unsigned NumNOPBytes = StackMapOpers(&MI).getNumPatchBytes(); auto &Ctx = OutStreamer.getContext(); @@ -165,7 +165,7 @@ void RISCVAsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM, // [], , , , void RISCVAsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM, const MachineInstr &MI) { - unsigned NOPBytes = STI->hasStdExtCOrZca() ? 2 : 4; + unsigned NOPBytes = STI->hasStdExtZca() ? 2 : 4; auto &Ctx = OutStreamer.getContext(); MCSymbol *MILabel = Ctx.createTempSymbol(); @@ -214,7 +214,7 @@ void RISCVAsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM, void RISCVAsmPrinter::LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM, const MachineInstr &MI) { - unsigned NOPBytes = STI->hasStdExtCOrZca() ? 2 : 4; + unsigned NOPBytes = STI->hasStdExtZca() ? 2 : 4; StatepointOpers SOpers(&MI); if (unsigned PatchBytes = SOpers.getNumPatchBytes()) { @@ -292,7 +292,7 @@ void RISCVAsmPrinter::emitNTLHint(const MachineInstr *MI) { NontemporalMode += 0b10; MCInst Hint; - if (STI->hasStdExtCOrZca() && STI->enableRVCHintInstrs()) + if (STI->hasStdExtZca() && STI->enableRVCHintInstrs()) Hint.setOpcode(RISCV::C_ADD_HINT); else Hint.setOpcode(RISCV::ADD); @@ -674,7 +674,7 @@ void RISCVAsmPrinter::LowerKCFI_CHECK(const MachineInstr &MI) { } else { // Adjust the offset for patchable-function-prefix. This assumes that // patchable-function-prefix is the same for all functions. - int NopSize = STI->hasStdExtCOrZca() ? 2 : 4; + int NopSize = STI->hasStdExtZca() ? 2 : 4; int64_t PrefixNops = 0; (void)MI.getMF() ->getFunction() diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td index 6df6368929dac..84a0b636782b1 100644 --- a/llvm/lib/Target/RISCV/RISCVFeatures.td +++ b/llvm/lib/Target/RISCV/RISCVFeatures.td @@ -385,6 +385,13 @@ def FeatureStdExtZca "part of the C extension, excluding compressed " "floating point loads/stores">, RISCVExtensionBitmask<1, 2>; +// FIXME: Update this message - Zca always implies C. +def HasStdExtZca + : Predicate<"Subtarget->hasStdExtZca()">, + AssemblerPredicate<(any_of FeatureStdExtZca), + "'C' (Compressed Instructions) or " + "'Zca' (part of the C extension, excluding " + "compressed floating point loads/stores)">; def FeatureStdExtC : RISCVExtension<2, 0, "Compressed Instructions", [FeatureStdExtZca]>, @@ -393,14 +400,6 @@ def HasStdExtC : Predicate<"Subtarget->hasStdExtC()">, AssemblerPredicate<(all_of FeatureStdExtC), "'C' (Compressed Instructions)">; - -def HasStdExtCOrZca - : Predicate<"Subtarget->hasStdExtCOrZca()">, - AssemblerPredicate<(any_of FeatureStdExtC, FeatureStdExtZca), - "'C' (Compressed Instructions) or " - "'Zca' (part of the C extension, excluding " - "compressed floating point loads/stores)">; - def FeatureStdExtZcb : RISCVExtension<1, 0, "Compressed basic bit manipulation instructions", [FeatureStdExtZca]>, diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp index 59d99e613c0f4..a796c910bd449 100644 --- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp @@ -1688,19 +1688,19 @@ static unsigned estimateFunctionSizeInBytes(const MachineFunction &MF, // // foo // bne t5, t6, .rev_cond # `TII->getInstSizeInBytes(MI)` bytes - // sd s11, 0(sp) # 4 bytes, or 2 bytes in RVC + // sd s11, 0(sp) # 4 bytes, or 2 bytes with Zca // jump .restore, s11 # 8 bytes // .rev_cond // bar - // j .dest_bb # 4 bytes, or 2 bytes in RVC + // j .dest_bb # 4 bytes, or 2 bytes with Zca // .restore: - // ld s11, 0(sp) # 4 bytes, or 2 bytes in RVC + // ld s11, 0(sp) # 4 bytes, or 2 bytes with Zca // .dest: // baz if (MI.isConditionalBranch()) FnSize += TII.getInstSizeInBytes(MI); if (MI.isConditionalBranch() || MI.isUnconditionalBranch()) { - if (MF.getSubtarget().hasStdExtCOrZca()) + if (MF.getSubtarget().hasStdExtZca()) FnSize += 2 + 8 + 2 + 2; else FnSize += 4 + 8 + 4 + 4; @@ -1865,7 +1865,7 @@ RISCVFrameLowering::getFirstSPAdjustAmount(const MachineFunction &MF) const { // instructions be compressed, so try to adjust the amount to the largest // offset that stack compression instructions accept when target supports // compression instructions. - if (STI.hasStdExtCOrZca()) { + if (STI.hasStdExtZca()) { // The compression extensions may support the following instructions: // riscv32: c.lwsp rd, offset[7:2] => 2^(6 + 2) // c.swsp rs2, offset[7:2] => 2^(6 + 2) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 0c54101a11568..88813ed31229e 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1598,7 +1598,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, } // Function alignments. - const Align FunctionAlignment(Subtarget.hasStdExtCOrZca() ? 2 : 4); + const Align FunctionAlignment(Subtarget.hasStdExtZca() ? 2 : 4); setMinFunctionAlignment(FunctionAlignment); // Set preferred alignments. setPrefFunctionAlignment(Subtarget.getPrefFunctionAlignment()); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp index d9ef911b9a32e..6e30ffce99c4d 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -88,7 +88,7 @@ RISCVInstrInfo::RISCVInstrInfo(RISCVSubtarget &STI) #include "RISCVGenInstrInfo.inc" MCInst RISCVInstrInfo::getNop() const { - if (STI.hasStdExtCOrZca()) + if (STI.hasStdExtZca()) return MCInstBuilder(RISCV::C_NOP); return MCInstBuilder(RISCV::ADDI) .addReg(RISCV::X0) @@ -1717,7 +1717,7 @@ unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { if (!MI.memoperands_empty()) { MachineMemOperand *MMO = *(MI.memoperands_begin()); if (STI.hasStdExtZihintntl() && MMO->isNonTemporal()) { - if (STI.hasStdExtCOrZca() && STI.enableRVCHintInstrs()) { + if (STI.hasStdExtZca() && STI.enableRVCHintInstrs()) { if (isCompressibleInst(MI, STI)) return 4; // c.ntl.all + c.load/c.store return 6; // c.ntl.all + load/store @@ -1738,7 +1738,7 @@ unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { case RISCV::PseudoMV_FPR16INX: case RISCV::PseudoMV_FPR32INX: // MV is always compressible to either c.mv or c.li rd, 0. - return STI.hasStdExtCOrZca() ? 2 : 4; + return STI.hasStdExtZca() ? 2 : 4; case TargetOpcode::STACKMAP: // The upper bound for a stackmap intrinsic is the full length of its shadow return StackMapOpers(&MI).getNumPatchBytes(); @@ -1765,7 +1765,7 @@ unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { return get(Opcode).getSize(); // Number of C.NOP or NOP - return (STI.hasStdExtCOrZca() ? 2 : 4) * Num; + return (STI.hasStdExtZca() ? 2 : 4) * Num; } // XRay uses C.JAL + 21 or 33 C.NOP for each sled in RV32 and RV64, // respectively. @@ -3341,14 +3341,14 @@ RISCVInstrInfo::getOutliningCandidateInfo( // Each RepeatedSequenceLoc is identical. outliner::Candidate &Candidate = RepeatedSequenceLocs[0]; unsigned InstrSizeCExt = - Candidate.getMF()->getSubtarget().hasStdExtCOrZca() ? 2 - : 4; + Candidate.getMF()->getSubtarget().hasStdExtZca() ? 2 : 4; unsigned CallOverhead = 0, FrameOverhead = 0; MachineOutlinerConstructionID MOCI = MachineOutlinerDefault; if (Candidate.back().isReturn()) { MOCI = MachineOutlinerTailCall; // tail call = auipc + jalr in the worst case without linker relaxation. + // FIXME: This code suggests the JALR can be compressed - how? CallOverhead = 4 + InstrSizeCExt; // Using tail call we move ret instruction from caller to callee. FrameOverhead = 0; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td index 70fad925cf070..aa1ebba567824 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -1686,7 +1686,7 @@ defm : BccPat; defm : BccPat; defm : BccPat; -let Predicates = [HasStdExtCOrZca, OptForMinSize] in { +let Predicates = [HasStdExtZca, OptForMinSize] in { def : BrccCompressOpt; def : BrccCompressOpt; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoC.td b/llvm/lib/Target/RISCV/RISCVInstrInfoC.td index fd8591f5ab2d8..f7e8ee61ebba8 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoC.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoC.td @@ -293,7 +293,7 @@ class CA_ALU funct6, bits<2> funct2, string OpcodeStr> // Instructions //===----------------------------------------------------------------------===// -let Predicates = [HasStdExtCOrZca] in { +let Predicates = [HasStdExtZca] in { let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [X2] in def C_ADDI4SPN : RVInst16CIW<0b000, 0b00, (outs GPRC:$rd), @@ -342,7 +342,7 @@ def C_FLW : CLoad_ri<0b011, "c.flw", FPR32C, uimm7_lsb00>, let Inst{5} = imm{6}; } -let Predicates = [HasStdExtCOrZca, IsRV64] in +let Predicates = [HasStdExtZca, IsRV64] in def C_LD : CLoad_ri<0b011, "c.ld", GPRC, uimm8_lsb000>, Sched<[WriteLDD, ReadMemBase]> { bits<8> imm; @@ -385,7 +385,7 @@ def C_FSW : CStore_rri<0b111, "c.fsw", FPR32C, uimm7_lsb00>, let Inst{5} = imm{6}; } -let Predicates = [HasStdExtCOrZca, IsRV64] in +let Predicates = [HasStdExtZca, IsRV64] in def C_SD : CStore_rri<0b111, "c.sd", GPRC, uimm8_lsb000>, Sched<[WriteSTD, ReadStoreData, ReadMemBase]> { bits<8> imm; @@ -416,12 +416,12 @@ def PseudoC_ADDI_NOP : Pseudo<(outs GPRX0:$rd), (ins GPRX0:$rs1, immzero:$imm), let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCall = 1, DecoderNamespace = "RV32Only", Defs = [X1], - Predicates = [HasStdExtCOrZca, IsRV32] in + Predicates = [HasStdExtZca, IsRV32] in def C_JAL : RVInst16CJ<0b001, 0b01, (outs), (ins bare_simm12_lsb0:$offset), "c.jal", "$offset">, Sched<[WriteJal]>; let hasSideEffects = 0, mayLoad = 0, mayStore = 0, - Predicates = [HasStdExtCOrZca, IsRV64] in + Predicates = [HasStdExtZca, IsRV64] in def C_ADDIW : RVInst16CI<0b001, 0b01, (outs GPRNoX0:$rd_wb), (ins GPRNoX0:$rd, simm6:$imm), "c.addiw", "$rd, $imm">, @@ -478,7 +478,7 @@ def C_OR : CA_ALU<0b100011, 0b10, "c.or">, def C_AND : CA_ALU<0b100011, 0b11, "c.and">, Sched<[WriteIALU, ReadIALU, ReadIALU]>; -let Predicates = [HasStdExtCOrZca, IsRV64] in { +let Predicates = [HasStdExtZca, IsRV64] in { def C_SUBW : CA_ALU<0b100111, 0b00, "c.subw">, Sched<[WriteIALU32, ReadIALU32, ReadIALU32]>; def C_ADDW : CA_ALU<0b100111, 0b01, "c.addw">, @@ -528,7 +528,7 @@ def C_FLWSP : CStackLoad<0b011, "c.flwsp", FPR32, uimm8_lsb00>, let Inst{3-2} = imm{7-6}; } -let Predicates = [HasStdExtCOrZca, IsRV64] in +let Predicates = [HasStdExtZca, IsRV64] in def C_LDSP : CStackLoad<0b011, "c.ldsp", GPRNoX0, uimm9_lsb000>, Sched<[WriteLDD, ReadMemBase]> { let Inst{4-2} = imm{8-6}; @@ -588,7 +588,7 @@ def C_FSWSP : CStackStore<0b111, "c.fswsp", FPR32, uimm8_lsb00>, let Inst{8-7} = imm{7-6}; } -let Predicates = [HasStdExtCOrZca, IsRV64] in +let Predicates = [HasStdExtZca, IsRV64] in def C_SDSP : CStackStore<0b111, "c.sdsp", GPR, uimm9_lsb000>, Sched<[WriteSTD, ReadStoreData, ReadMemBase]> { let Inst{9-7} = imm{8-6}; @@ -602,13 +602,13 @@ def C_UNIMP : RVInst16<(outs), (ins), "c.unimp", "", [], InstFormatOther>, let Inst{15-0} = 0; } -} // Predicates = [HasStdExtCOrZca] +} // Predicates = [HasStdExtZca] //===----------------------------------------------------------------------===// // HINT Instructions //===----------------------------------------------------------------------===// -let Predicates = [HasStdExtCOrZca, HasRVCHints], hasSideEffects = 0, mayLoad = 0, +let Predicates = [HasStdExtZca, HasRVCHints], hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { def C_NOP_HINT : RVInst16CI<0b000, 0b01, (outs), (ins simm6nonzero:$imm), @@ -691,14 +691,14 @@ def C_SRAI64_HINT : RVInst16CB<0b100, 0b01, (outs GPRC:$rd), let Inst{12} = 0; } -} // Predicates = [HasStdExtCOrZca, HasRVCHints], hasSideEffects = 0, mayLoad = 0, +} // Predicates = [HasStdExtZca, HasRVCHints], hasSideEffects = 0, mayLoad = 0, // mayStore = 0 //===----------------------------------------------------------------------===// // Assembler Pseudo Instructions //===----------------------------------------------------------------------===// -let Predicates = [HasStdExtCOrZca, HasRVCHints] in { +let Predicates = [HasStdExtZca, HasRVCHints] in { // Just a different syntax for the c.nop hint: c.addi x0, simm6 vs c.nop simm6. def : InstAlias<"c.addi x0, $imm", (C_NOP_HINT simm6nonzero:$imm), 0>; } @@ -711,14 +711,14 @@ def : InstAlias<"c.ntl.all", (C_ADD_HINT X0, X5)>; } // Predicates = [HasStdExtC, HasRVCHints, HasStdExtZihintntl] let EmitPriority = 0 in { -let Predicates = [HasStdExtCOrZca] in { +let Predicates = [HasStdExtZca] in { def : InstAlias<"c.lw $rd, (${rs1})", (C_LW GPRC:$rd, GPRCMem:$rs1, 0)>; def : InstAlias<"c.sw $rs2, (${rs1})", (C_SW GPRC:$rs2, GPRCMem:$rs1, 0)>; def : InstAlias<"c.lwsp $rd, (${rs1})", (C_LWSP GPRNoX0:$rd, SPMem:$rs1, 0)>; def : InstAlias<"c.swsp $rs2, (${rs1})", (C_SWSP GPR:$rs2, SPMem:$rs1, 0)>; } -let Predicates = [HasStdExtCOrZca, IsRV64] in { +let Predicates = [HasStdExtZca, IsRV64] in { def : InstAlias<"c.ld $rd, (${rs1})", (C_LD GPRC:$rd, GPRCMem:$rs1, 0)>; def : InstAlias<"c.sd $rs2, (${rs1})", (C_SD GPRC:$rs2, GPRCMem:$rs1, 0)>; def : InstAlias<"c.ldsp $rd, (${rs1})", (C_LDSP GPRNoX0:$rd, SPMem:$rs1, 0)>; @@ -757,7 +757,7 @@ def AnyRegC : Operand { // isCodeGenOnly = 1 to hide them from the tablegened assembly parser. let isCodeGenOnly = 1, hasSideEffects = 1, mayLoad = 1, mayStore = 1, - hasNoSchedulingInfo = 1, Predicates = [HasStdExtCOrZca] in { + hasNoSchedulingInfo = 1, Predicates = [HasStdExtZca] in { def InsnCR : DirectiveInsnCR<(outs AnyReg:$rd), (ins uimm2_opcode:$opcode, uimm4:$funct4, AnyReg:$rs2), @@ -805,7 +805,7 @@ def InsnCJ : DirectiveInsnCJ<(outs), (ins uimm2_opcode:$opcode, // into a mnemonic to use as the key for the tablegened asm matcher table. The // parser will take care of creating these fake mnemonics and will only do it // for known formats. -let EmitPriority = 0, Predicates = [HasStdExtCOrZca] in { +let EmitPriority = 0, Predicates = [HasStdExtZca] in { def : InstAlias<".insn_cr $opcode, $funct4, $rd, $rs2", (InsnCR AnyReg:$rd, uimm2_opcode:$opcode, uimm4:$funct4, AnyReg:$rs2)>; @@ -848,77 +848,77 @@ def : InstAlias<".insn_cj $opcode, $funct3, $imm11", // under the "RVC Instruction Set Listings" section of the ISA manual. // Quadrant 0 -let Predicates = [HasStdExtCOrZca] in { +let Predicates = [HasStdExtZca] in { def : CompressPat<(ADDI GPRC:$rd, SP:$rs1, uimm10_lsb00nonzero:$imm), (C_ADDI4SPN GPRC:$rd, SP:$rs1, uimm10_lsb00nonzero:$imm)>; -} // Predicates = [HasStdExtCOrZca] +} // Predicates = [HasStdExtZca] let Predicates = [HasStdExtCOrZcd, HasStdExtD] in { def : CompressPat<(FLD FPR64C:$rd, GPRCMem:$rs1, uimm8_lsb000:$imm), (C_FLD FPR64C:$rd, GPRCMem:$rs1, uimm8_lsb000:$imm)>; } // Predicates = [HasStdExtCOrZcd, HasStdExtD] -let Predicates = [HasStdExtCOrZca] in { +let Predicates = [HasStdExtZca] in { def : CompressPat<(LW GPRC:$rd, GPRCMem:$rs1, uimm7_lsb00:$imm), (C_LW GPRC:$rd, GPRCMem:$rs1, uimm7_lsb00:$imm)>; let isCompressOnly = true in def : CompressPat<(LW_INX GPRF32C:$rd, GPRCMem:$rs1, uimm7_lsb00:$imm), (C_LW_INX GPRF32C:$rd, GPRCMem:$rs1, uimm7_lsb00:$imm)>; -} // Predicates = [HasStdExtCOrZca] +} // Predicates = [HasStdExtZca] let Predicates = [HasStdExtCOrZcfOrZce, HasStdExtF, IsRV32] in { def : CompressPat<(FLW FPR32C:$rd, GPRCMem:$rs1, uimm7_lsb00:$imm), (C_FLW FPR32C:$rd, GPRCMem:$rs1, uimm7_lsb00:$imm)>; } // Predicates = [HasStdExtC, HasStdExtF, IsRV32] -let Predicates = [HasStdExtCOrZca, IsRV64] in { +let Predicates = [HasStdExtZca, IsRV64] in { def : CompressPat<(LD GPRC:$rd, GPRCMem:$rs1, uimm8_lsb000:$imm), (C_LD GPRC:$rd, GPRCMem:$rs1, uimm8_lsb000:$imm)>; -} // Predicates = [HasStdExtCOrZca, IsRV64] +} // Predicates = [HasStdExtZca, IsRV64] let Predicates = [HasStdExtCOrZcd, HasStdExtD] in { def : CompressPat<(FSD FPR64C:$rs2, GPRCMem:$rs1, uimm8_lsb000:$imm), (C_FSD FPR64C:$rs2, GPRCMem:$rs1, uimm8_lsb000:$imm)>; } // Predicates = [HasStdExtCOrZcd, HasStdExtD] -let Predicates = [HasStdExtCOrZca] in { +let Predicates = [HasStdExtZca] in { def : CompressPat<(SW GPRC:$rs2, GPRCMem:$rs1, uimm7_lsb00:$imm), (C_SW GPRC:$rs2, GPRCMem:$rs1, uimm7_lsb00:$imm)>; let isCompressOnly = true in def : CompressPat<(SW_INX GPRF32C:$rs2, GPRCMem:$rs1, uimm7_lsb00:$imm), (C_SW_INX GPRF32C:$rs2, GPRCMem:$rs1, uimm7_lsb00:$imm)>; -} // Predicates = [HasStdExtCOrZca] +} // Predicates = [HasStdExtZca] let Predicates = [HasStdExtCOrZcfOrZce, HasStdExtF, IsRV32] in { def : CompressPat<(FSW FPR32C:$rs2, GPRCMem:$rs1, uimm7_lsb00:$imm), (C_FSW FPR32C:$rs2, GPRCMem:$rs1, uimm7_lsb00:$imm)>; } // Predicates = [HasStdExtC, HasStdExtF, IsRV32] -let Predicates = [HasStdExtCOrZca, IsRV64] in { +let Predicates = [HasStdExtZca, IsRV64] in { def : CompressPat<(SD GPRC:$rs2, GPRCMem:$rs1, uimm8_lsb000:$imm), (C_SD GPRC:$rs2, GPRCMem:$rs1, uimm8_lsb000:$imm)>; -} // Predicates = [HasStdExtCOrZca, IsRV64] +} // Predicates = [HasStdExtZca, IsRV64] // Quadrant 1 -let Predicates = [HasStdExtCOrZca] in { +let Predicates = [HasStdExtZca] in { def : CompressPat<(ADDI X0, X0, 0), (C_NOP)>; def : CompressPat<(ADDI GPRNoX0:$rs1, GPRNoX0:$rs1, simm6nonzero:$imm), (C_ADDI GPRNoX0:$rs1, simm6nonzero:$imm)>; -} // Predicates = [HasStdExtCOrZca] +} // Predicates = [HasStdExtZca] -let Predicates = [HasStdExtCOrZca, IsRV32] in { +let Predicates = [HasStdExtZca, IsRV32] in { def : CompressPat<(JAL X1, bare_simm12_lsb0:$offset), (C_JAL bare_simm12_lsb0:$offset)>; -} // Predicates = [HasStdExtCOrZca, IsRV32] +} // Predicates = [HasStdExtZca, IsRV32] -let Predicates = [HasStdExtCOrZca, IsRV64] in { +let Predicates = [HasStdExtZca, IsRV64] in { def : CompressPat<(ADDIW GPRNoX0:$rs1, GPRNoX0:$rs1, simm6:$imm), (C_ADDIW GPRNoX0:$rs1, simm6:$imm)>; -} // Predicates = [HasStdExtCOrZca, IsRV64] +} // Predicates = [HasStdExtZca, IsRV64] -let Predicates = [HasStdExtCOrZca] in { +let Predicates = [HasStdExtZca] in { def : CompressPat<(ADDI GPRNoX0:$rd, X0, simm6:$imm), (C_LI GPRNoX0:$rd, simm6:$imm)>; def : CompressPat<(ADDI X2, X2, simm10_lsb0000nonzero:$imm), @@ -948,9 +948,9 @@ def : CompressPat<(AND GPRC:$rs1, GPRC:$rs1, GPRC:$rs2), let isCompressOnly = true in def : CompressPat<(AND GPRC:$rs1, GPRC:$rs2, GPRC:$rs1), (C_AND GPRC:$rs1, GPRC:$rs2)>; -} // Predicates = [HasStdExtCOrZca] +} // Predicates = [HasStdExtZca] -let Predicates = [HasStdExtCOrZca, IsRV64] in { +let Predicates = [HasStdExtZca, IsRV64] in { let isCompressOnly = true in def : CompressPat<(ADDIW GPRNoX0:$rd, X0, simm6:$imm), (C_LI GPRNoX0:$rd, simm6:$imm)>; @@ -961,9 +961,9 @@ def : CompressPat<(ADDW GPRC:$rs1, GPRC:$rs1, GPRC:$rs2), let isCompressOnly = true in def : CompressPat<(ADDW GPRC:$rs1, GPRC:$rs2, GPRC:$rs1), (C_ADDW GPRC:$rs1, GPRC:$rs2)>; -} // Predicates = [HasStdExtCOrZca, IsRV64] +} // Predicates = [HasStdExtZca, IsRV64] -let Predicates = [HasStdExtCOrZca] in { +let Predicates = [HasStdExtZca] in { def : CompressPat<(JAL X0, bare_simm12_lsb0:$offset), (C_J bare_simm12_lsb0:$offset)>; def : CompressPat<(BEQ GPRC:$rs1, X0, bare_simm9_lsb0:$imm), @@ -976,39 +976,39 @@ def : CompressPat<(BNE GPRC:$rs1, X0, bare_simm9_lsb0:$imm), let isCompressOnly = true in def : CompressPat<(BNE X0, GPRC:$rs1, bare_simm9_lsb0:$imm), (C_BNEZ GPRC:$rs1, bare_simm9_lsb0:$imm)>; -} // Predicates = [HasStdExtCOrZca] +} // Predicates = [HasStdExtZca] // Quadrant 2 -let Predicates = [HasStdExtCOrZca] in { +let Predicates = [HasStdExtZca] in { def : CompressPat<(SLLI GPRNoX0:$rs1, GPRNoX0:$rs1, uimmlog2xlennonzero:$imm), (C_SLLI GPRNoX0:$rs1, uimmlog2xlennonzero:$imm)>; -} // Predicates = [HasStdExtCOrZca] +} // Predicates = [HasStdExtZca] let Predicates = [HasStdExtCOrZcd, HasStdExtD] in { def : CompressPat<(FLD FPR64:$rd, SPMem:$rs1, uimm9_lsb000:$imm), (C_FLDSP FPR64:$rd, SPMem:$rs1, uimm9_lsb000:$imm)>; } // Predicates = [HasStdExtCOrZcd, HasStdExtD] -let Predicates = [HasStdExtCOrZca] in { +let Predicates = [HasStdExtZca] in { def : CompressPat<(LW GPRNoX0:$rd, SPMem:$rs1, uimm8_lsb00:$imm), (C_LWSP GPRNoX0:$rd, SPMem:$rs1, uimm8_lsb00:$imm)>; let isCompressOnly = true in def : CompressPat<(LW_INX GPRF32NoX0:$rd, SPMem:$rs1, uimm8_lsb00:$imm), (C_LWSP_INX GPRF32NoX0:$rd, SPMem:$rs1, uimm8_lsb00:$imm)>; -} // Predicates = [HasStdExtCOrZca] +} // Predicates = [HasStdExtZca] let Predicates = [HasStdExtCOrZcfOrZce, HasStdExtF, IsRV32] in { def : CompressPat<(FLW FPR32:$rd, SPMem:$rs1, uimm8_lsb00:$imm), (C_FLWSP FPR32:$rd, SPMem:$rs1, uimm8_lsb00:$imm)>; } // Predicates = [HasStdExtC, HasStdExtF, IsRV32] -let Predicates = [HasStdExtCOrZca, IsRV64] in { +let Predicates = [HasStdExtZca, IsRV64] in { def : CompressPat<(LD GPRNoX0:$rd, SPMem:$rs1, uimm9_lsb000:$imm), (C_LDSP GPRNoX0:$rd, SPMem:$rs1, uimm9_lsb000:$imm)>; -} // Predicates = [HasStdExtCOrZca, IsRV64] +} // Predicates = [HasStdExtZca, IsRV64] -let Predicates = [HasStdExtCOrZca] in { +let Predicates = [HasStdExtZca] in { def : CompressPat<(JALR X0, GPRNoX0:$rs1, 0), (C_JR GPRNoX0:$rs1)>; let isCompressOnly = true in { @@ -1028,28 +1028,28 @@ def : CompressPat<(ADD GPRNoX0:$rs1, GPRNoX0:$rs1, GPRNoX0:$rs2), let isCompressOnly = true in def : CompressPat<(ADD GPRNoX0:$rs1, GPRNoX0:$rs2, GPRNoX0:$rs1), (C_ADD GPRNoX0:$rs1, GPRNoX0:$rs2)>; -} // Predicates = [HasStdExtCOrZca] +} // Predicates = [HasStdExtZca] let Predicates = [HasStdExtCOrZcd, HasStdExtD] in { def : CompressPat<(FSD FPR64:$rs2, SPMem:$rs1, uimm9_lsb000:$imm), (C_FSDSP FPR64:$rs2, SPMem:$rs1, uimm9_lsb000:$imm)>; } // Predicates = [HasStdExtCOrZcd, HasStdExtD] -let Predicates = [HasStdExtCOrZca] in { +let Predicates = [HasStdExtZca] in { def : CompressPat<(SW GPR:$rs2, SPMem:$rs1, uimm8_lsb00:$imm), (C_SWSP GPR:$rs2, SPMem:$rs1, uimm8_lsb00:$imm)>; let isCompressOnly = true in def : CompressPat<(SW_INX GPRF32:$rs2, SPMem:$rs1, uimm8_lsb00:$imm), (C_SWSP_INX GPRF32:$rs2, SPMem:$rs1, uimm8_lsb00:$imm)>; -} // Predicates = [HasStdExtCOrZca] +} // Predicates = [HasStdExtZca] let Predicates = [HasStdExtCOrZcfOrZce, HasStdExtF, IsRV32] in { def : CompressPat<(FSW FPR32:$rs2, SPMem:$rs1, uimm8_lsb00:$imm), (C_FSWSP FPR32:$rs2, SPMem:$rs1, uimm8_lsb00:$imm)>; } // Predicates = [HasStdExtC, HasStdExtF, IsRV32] -let Predicates = [HasStdExtCOrZca, IsRV64] in { +let Predicates = [HasStdExtZca, IsRV64] in { def : CompressPat<(SD GPR:$rs2, SPMem:$rs1, uimm9_lsb000:$imm), (C_SDSP GPR:$rs2, SPMem:$rs1, uimm9_lsb000:$imm)>; -} // Predicates = [HasStdExtCOrZca, IsRV64] +} // Predicates = [HasStdExtZca, IsRV64] diff --git a/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp b/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp index 2c409b98e207b..f8d33ae8d24ca 100644 --- a/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp +++ b/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp @@ -207,7 +207,7 @@ static bool isCompressibleLoad(const MachineInstr &MI) { case RISCV::LW: case RISCV::LW_INX: case RISCV::LD: - return STI.hasStdExtCOrZca(); + return STI.hasStdExtZca(); case RISCV::LD_RV32: return STI.hasStdExtZclsd(); case RISCV::FLW: @@ -231,7 +231,7 @@ static bool isCompressibleStore(const MachineInstr &MI) { case RISCV::SW: case RISCV::SW_INX: case RISCV::SD: - return STI.hasStdExtCOrZca(); + return STI.hasStdExtZca(); case RISCV::SD_RV32: return STI.hasStdExtZclsd(); case RISCV::FSW: @@ -415,7 +415,7 @@ bool RISCVMakeCompressibleOpt::runOnMachineFunction(MachineFunction &Fn) { const RISCVInstrInfo &TII = *STI.getInstrInfo(); // This optimization only makes sense if compressed instructions are emitted. - if (!STI.hasStdExtCOrZca()) + if (!STI.hasStdExtZca()) return false; for (MachineBasicBlock &MBB : Fn) { diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp index 7fdbf4be1ed12..540412366026b 100644 --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp @@ -880,8 +880,7 @@ void RISCVRegisterInfo::getOffsetOpcodes(const StackOffset &Offset, unsigned RISCVRegisterInfo::getRegisterCostTableIndex(const MachineFunction &MF) const { - return MF.getSubtarget().hasStdExtCOrZca() && - !DisableCostPerUse + return MF.getSubtarget().hasStdExtZca() && !DisableCostPerUse ? 1 : 0; } diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h index 04c7ca7d0572b..4f560cca22dff 100644 --- a/llvm/lib/Target/RISCV/RISCVSubtarget.h +++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h @@ -24,6 +24,7 @@ #include "llvm/CodeGen/MachineScheduler.h" #include "llvm/CodeGen/TargetSubtargetInfo.h" #include "llvm/IR/DataLayout.h" +#include "llvm/Support/Compiler.h" #include "llvm/Target/TargetMachine.h" #include @@ -168,7 +169,8 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo { bool GETTER() const { return ATTRIBUTE; } #include "RISCVGenSubtargetInfo.inc" - bool hasStdExtCOrZca() const { return HasStdExtC || HasStdExtZca; } + LLVM_DEPRECATED("Now Equivalent to hasStdExtZca", "hasStdExtZca") + bool hasStdExtCOrZca() const { return HasStdExtZca; } bool hasStdExtCOrZcd() const { return HasStdExtC || HasStdExtZcd; } bool hasStdExtCOrZcfOrZce() const { return HasStdExtC || HasStdExtZcf || HasStdExtZce; @@ -186,7 +188,7 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo { bool hasConditionalMoveFusion() const { // Do we support fusing a branch+mv or branch+c.mv as a conditional move. - return (hasConditionalCompressedMoveFusion() && hasStdExtCOrZca()) || + return (hasConditionalCompressedMoveFusion() && hasStdExtZca()) || hasShortForwardBranchOpt(); }