diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs index ff1ae2d9d792c..73be25ba92b7e 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -23,7 +23,7 @@ use rustc_codegen_ssa::traits::{ use rustc_middle::bug; #[cfg(feature = "master")] use rustc_middle::ty::layout::FnAbiOf; -use rustc_middle::ty::layout::{HasTypingEnv, LayoutOf}; +use rustc_middle::ty::layout::LayoutOf; use rustc_middle::ty::{self, Instance, Ty}; use rustc_span::{Span, Symbol, sym}; use rustc_target::callconv::{ArgAbi, PassMode}; @@ -205,21 +205,10 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc span: Span, ) -> Result<(), Instance<'tcx>> { let tcx = self.tcx; - let callee_ty = instance.ty(tcx, self.typing_env()); - let (def_id, fn_args) = match *callee_ty.kind() { - ty::FnDef(def_id, fn_args) => (def_id, fn_args), - _ => bug!("expected fn item type, found {}", callee_ty), - }; - - let sig = callee_ty.fn_sig(tcx); - let sig = tcx.normalize_erasing_late_bound_regions(self.typing_env(), sig); - let arg_tys = sig.inputs(); - let ret_ty = sig.output(); - let name = tcx.item_name(def_id); + let name = tcx.item_name(instance.def_id()); let name_str = name.as_str(); - - let llret_ty = self.layout_of(ret_ty).gcc_type(self); + let fn_args = instance.args; let simple = get_simple_intrinsic(self, name); let simple_func = get_simple_function(self, name); @@ -320,8 +309,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc | sym::rotate_right | sym::saturating_add | sym::saturating_sub => { - let ty = arg_tys[0]; - match int_type_width_signed(ty, self) { + match int_type_width_signed(args[0].layout.ty, self) { Some((width, signed)) => match name { sym::ctlz | sym::cttz => { let func = self.current_func.borrow().expect("func"); @@ -400,7 +388,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType { span, name, - ty, + ty: args[0].layout.ty, }); return Ok(()); } @@ -492,7 +480,14 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc } _ if name_str.starts_with("simd_") => { - match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) { + match generic_simd_intrinsic( + self, + name, + args, + result.layout.ty, + result.layout.gcc_type(self), + span, + ) { Ok(value) => value, Err(()) => return Ok(()), } @@ -503,13 +498,10 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc }; if result.layout.ty.is_bool() { - OperandRef::from_immediate_or_packed_pair(self, value, result.layout) - .val - .store(self, result); + let val = self.from_immediate(value); + self.store_to_place(val, result.val); } else if !result.layout.ty.is_unit() { - let ptr_llty = self.type_ptr_to(result.layout.gcc_type(self)); - let ptr = self.pointercast(result.val.llval, ptr_llty); - self.store(value, ptr, result.val.align); + self.store_to_place(value, result.val); } Ok(()) } diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs index b897d07924914..82ef0d0b13a45 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs @@ -28,7 +28,6 @@ use crate::context::CodegenCx; pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( bx: &mut Builder<'a, 'gcc, 'tcx>, name: Symbol, - callee_ty: Ty<'tcx>, args: &[OperandRef<'tcx, RValue<'gcc>>], ret_ty: Ty<'tcx>, llret_ty: Type<'gcc>, @@ -54,24 +53,17 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( }; } - let tcx = bx.tcx(); - let sig = tcx.normalize_erasing_late_bound_regions( - ty::TypingEnv::fully_monomorphized(), - callee_ty.fn_sig(tcx), - ); - let arg_tys = sig.inputs(); - if name == sym::simd_select_bitmask { require_simd!( - arg_tys[1], - InvalidMonomorphization::SimdArgument { span, name, ty: arg_tys[1] } + args[1].layout.ty, + InvalidMonomorphization::SimdArgument { span, name, ty: args[1].layout.ty } ); - let (len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (len, _) = args[1].layout.ty.simd_size_and_type(bx.tcx()); let expected_int_bits = (len.max(8) - 1).next_power_of_two(); let expected_bytes = len / 8 + ((len % 8 > 0) as u64); - let mask_ty = arg_tys[0]; + let mask_ty = args[0].layout.ty; let mut mask = match *mask_ty.kind() { ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(), ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(), @@ -121,8 +113,11 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( } // every intrinsic below takes a SIMD vector as its first argument - require_simd!(arg_tys[0], InvalidMonomorphization::SimdInput { span, name, ty: arg_tys[0] }); - let in_ty = arg_tys[0]; + require_simd!( + args[0].layout.ty, + InvalidMonomorphization::SimdInput { span, name, ty: args[0].layout.ty } + ); + let in_ty = args[0].layout.ty; let comparison = match name { sym::simd_eq => Some(BinOp::Eq), @@ -134,7 +129,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( _ => None, }; - let (in_len, in_elem) = arg_tys[0].simd_size_and_type(bx.tcx()); + let (in_len, in_elem) = args[0].layout.ty.simd_size_and_type(bx.tcx()); if let Some(cmp_op) = comparison { require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty }); @@ -401,13 +396,13 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( #[cfg(feature = "master")] if name == sym::simd_insert || name == sym::simd_insert_dyn { require!( - in_elem == arg_tys[2], + in_elem == args[2].layout.ty, InvalidMonomorphization::InsertedType { span, name, in_elem, in_ty, - out_ty: arg_tys[2] + out_ty: args[2].layout.ty } ); @@ -439,10 +434,10 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( let m_elem_ty = in_elem; let m_len = in_len; require_simd!( - arg_tys[1], - InvalidMonomorphization::SimdArgument { span, name, ty: arg_tys[1] } + args[1].layout.ty, + InvalidMonomorphization::SimdArgument { span, name, ty: args[1].layout.ty } ); - let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (v_len, _) = args[1].layout.ty.simd_size_and_type(bx.tcx()); require!( m_len == v_len, InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len } @@ -911,18 +906,18 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( // All types must be simd vector types require_simd!(in_ty, InvalidMonomorphization::SimdFirst { span, name, ty: in_ty }); require_simd!( - arg_tys[1], - InvalidMonomorphization::SimdSecond { span, name, ty: arg_tys[1] } + args[1].layout.ty, + InvalidMonomorphization::SimdSecond { span, name, ty: args[1].layout.ty } ); require_simd!( - arg_tys[2], - InvalidMonomorphization::SimdThird { span, name, ty: arg_tys[2] } + args[2].layout.ty, + InvalidMonomorphization::SimdThird { span, name, ty: args[2].layout.ty } ); require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty }); // Of the same length: - let (out_len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); - let (out_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx()); + let (out_len, _) = args[1].layout.ty.simd_size_and_type(bx.tcx()); + let (out_len2, _) = args[2].layout.ty.simd_size_and_type(bx.tcx()); require!( in_len == out_len, InvalidMonomorphization::SecondArgumentLength { @@ -930,7 +925,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( name, in_len, in_ty, - arg_ty: arg_tys[1], + arg_ty: args[1].layout.ty, out_len } ); @@ -941,7 +936,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( name, in_len, in_ty, - arg_ty: arg_tys[2], + arg_ty: args[2].layout.ty, out_len: out_len2 } ); @@ -970,8 +965,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( // The second argument must be a simd vector with an element type that's a pointer // to the element type of the first argument - let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx()); - let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (_, element_ty0) = args[0].layout.ty.simd_size_and_type(bx.tcx()); + let (_, element_ty1) = args[1].layout.ty.simd_size_and_type(bx.tcx()); let (pointer_count, underlying_ty) = match *element_ty1.kind() { ty::RawPtr(p_ty, _) if p_ty == in_elem => { (ptr_count(element_ty1), non_ptr(element_ty1)) @@ -983,7 +978,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( span, name, expected_element: element_ty1, - second_arg: arg_tys[1], + second_arg: args[1].layout.ty, in_elem, in_ty, mutability: ExpectedPointerMutability::Not, @@ -998,7 +993,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( // The element type of the third argument must be an integer type of any width: // TODO: also support unsigned integers. - let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx()); + let (_, element_ty2) = args[2].layout.ty.simd_size_and_type(bx.tcx()); match *element_ty2.kind() { ty::Int(_) => (), _ => { @@ -1030,17 +1025,17 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( // All types must be simd vector types require_simd!(in_ty, InvalidMonomorphization::SimdFirst { span, name, ty: in_ty }); require_simd!( - arg_tys[1], - InvalidMonomorphization::SimdSecond { span, name, ty: arg_tys[1] } + args[1].layout.ty, + InvalidMonomorphization::SimdSecond { span, name, ty: args[1].layout.ty } ); require_simd!( - arg_tys[2], - InvalidMonomorphization::SimdThird { span, name, ty: arg_tys[2] } + args[2].layout.ty, + InvalidMonomorphization::SimdThird { span, name, ty: args[2].layout.ty } ); // Of the same length: - let (element_len1, _) = arg_tys[1].simd_size_and_type(bx.tcx()); - let (element_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx()); + let (element_len1, _) = args[1].layout.ty.simd_size_and_type(bx.tcx()); + let (element_len2, _) = args[2].layout.ty.simd_size_and_type(bx.tcx()); require!( in_len == element_len1, InvalidMonomorphization::SecondArgumentLength { @@ -1048,7 +1043,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( name, in_len, in_ty, - arg_ty: arg_tys[1], + arg_ty: args[1].layout.ty, out_len: element_len1 } ); @@ -1059,7 +1054,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( name, in_len, in_ty, - arg_ty: arg_tys[2], + arg_ty: args[2].layout.ty, out_len: element_len2 } ); @@ -1082,9 +1077,9 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( // The second argument must be a simd vector with an element type that's a pointer // to the element type of the first argument - let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx()); - let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx()); - let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx()); + let (_, element_ty0) = args[0].layout.ty.simd_size_and_type(bx.tcx()); + let (_, element_ty1) = args[1].layout.ty.simd_size_and_type(bx.tcx()); + let (_, element_ty2) = args[2].layout.ty.simd_size_and_type(bx.tcx()); let (pointer_count, underlying_ty) = match *element_ty1.kind() { ty::RawPtr(p_ty, mutbl) if p_ty == in_elem && mutbl == hir::Mutability::Mut => { (ptr_count(element_ty1), non_ptr(element_ty1)) @@ -1096,7 +1091,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( span, name, expected_element: element_ty1, - second_arg: arg_tys[1], + second_arg: args[1].layout.ty, in_elem, in_ty, mutability: ExpectedPointerMutability::Mut, @@ -1194,8 +1189,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( return_error!(InvalidMonomorphization::ExpectedVectorElementType { span, name, - expected_element: arg_tys[0].simd_size_and_type(bx.tcx()).1, - vector_type: arg_tys[0], + expected_element: args[0].layout.ty.simd_size_and_type(bx.tcx()).1, + vector_type: args[0].layout.ty, }); } }; diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index e8629aeebb95a..989752eb78ea4 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -169,19 +169,9 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { span: Span, ) -> Result<(), ty::Instance<'tcx>> { let tcx = self.tcx; - let callee_ty = instance.ty(tcx, self.typing_env()); - let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else { - bug!("expected fn item type, found {}", callee_ty); - }; - - let sig = callee_ty.fn_sig(tcx); - let sig = tcx.normalize_erasing_late_bound_regions(self.typing_env(), sig); - let arg_tys = sig.inputs(); - let ret_ty = sig.output(); - let name = tcx.item_name(def_id); - - let llret_ty = self.layout_of(ret_ty).llvm_type(self); + let name = tcx.item_name(instance.def_id()); + let fn_args = instance.args; let simple = get_simple_intrinsic(self, name); let llval = match name { @@ -265,22 +255,22 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { BackendRepr::Scalar(scalar) => { match scalar.primitive() { Primitive::Int(..) => { - if self.cx().size_of(ret_ty).bytes() < 4 { + if self.cx().size_of(result.layout.ty).bytes() < 4 { // `va_arg` should not be called on an integer type // less than 4 bytes in length. If it is, promote // the integer to an `i32` and truncate the result // back to the smaller type. let promoted_result = emit_va_arg(self, args[0], tcx.types.i32); - self.trunc(promoted_result, llret_ty) + self.trunc(promoted_result, result.layout.llvm_type(self)) } else { - emit_va_arg(self, args[0], ret_ty) + emit_va_arg(self, args[0], result.layout.ty) } } Primitive::Float(Float::F16) => { bug!("the va_arg intrinsic does not work with `f16`") } Primitive::Float(Float::F64) | Primitive::Pointer(_) => { - emit_va_arg(self, args[0], ret_ty) + emit_va_arg(self, args[0], result.layout.ty) } // `va_arg` should never be used with the return type f32. Primitive::Float(Float::F32) => { @@ -384,7 +374,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { | sym::rotate_right | sym::saturating_add | sym::saturating_sub => { - let ty = arg_tys[0]; + let ty = args[0].layout.ty; if !ty.is_integral() { tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType { span, @@ -403,26 +393,26 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { &[args[0].immediate(), y], ); - self.intcast(ret, llret_ty, false) + self.intcast(ret, result.layout.llvm_type(self), false) } sym::ctlz_nonzero => { let y = self.const_bool(true); let llvm_name = &format!("llvm.ctlz.i{width}"); let ret = self.call_intrinsic(llvm_name, &[args[0].immediate(), y]); - self.intcast(ret, llret_ty, false) + self.intcast(ret, result.layout.llvm_type(self), false) } sym::cttz_nonzero => { let y = self.const_bool(true); let llvm_name = &format!("llvm.cttz.i{width}"); let ret = self.call_intrinsic(llvm_name, &[args[0].immediate(), y]); - self.intcast(ret, llret_ty, false) + self.intcast(ret, result.layout.llvm_type(self), false) } sym::ctpop => { let ret = self.call_intrinsic( &format!("llvm.ctpop.i{width}"), &[args[0].immediate()], ); - self.intcast(ret, llret_ty, false) + self.intcast(ret, result.layout.llvm_type(self), false) } sym::bswap => { if width == 8 { @@ -554,16 +544,16 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { // Unpack non-power-of-2 #[repr(packed, simd)] arguments. // This gives them the expected layout of a regular #[repr(simd)] vector. let mut loaded_args = Vec::new(); - for (ty, arg) in arg_tys.iter().zip(args) { + for arg in args { loaded_args.push( // #[repr(packed, simd)] vectors are passed like arrays (as references, // with reduced alignment and no padding) rather than as immediates. // We can use a vector load to fix the layout and turn the argument // into an immediate. - if ty.is_simd() + if arg.layout.ty.is_simd() && let OperandValue::Ref(place) = arg.val { - let (size, elem_ty) = ty.simd_size_and_type(self.tcx()); + let (size, elem_ty) = arg.layout.ty.simd_size_and_type(self.tcx()); let elem_ll_ty = match elem_ty.kind() { ty::Float(f) => self.type_float_from_ty(*f), ty::Int(i) => self.type_int_from_ty(*i), @@ -580,10 +570,10 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { ); } - let llret_ty = if ret_ty.is_simd() - && let BackendRepr::Memory { .. } = self.layout_of(ret_ty).layout.backend_repr + let llret_ty = if result.layout.ty.is_simd() + && let BackendRepr::Memory { .. } = result.layout.backend_repr { - let (size, elem_ty) = ret_ty.simd_size_and_type(self.tcx()); + let (size, elem_ty) = result.layout.ty.simd_size_and_type(self.tcx()); let elem_ll_ty = match elem_ty.kind() { ty::Float(f) => self.type_float_from_ty(*f), ty::Int(i) => self.type_int_from_ty(*i), @@ -593,16 +583,15 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { }; self.type_vector(elem_ll_ty, size) } else { - llret_ty + result.layout.llvm_type(self) }; match generic_simd_intrinsic( self, name, - callee_ty, fn_args, &loaded_args, - ret_ty, + result.layout.ty, llret_ty, span, ) { @@ -621,9 +610,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { }; if result.layout.ty.is_bool() { - OperandRef::from_immediate_or_packed_pair(self, llval, result.layout) - .val - .store(self, result); + let val = self.from_immediate(llval); + self.store_to_place(val, result.val); } else if !result.layout.ty.is_unit() { self.store_to_place(llval, result.val); } @@ -1151,7 +1139,6 @@ fn get_rust_try_fn<'a, 'll, 'tcx>( fn generic_simd_intrinsic<'ll, 'tcx>( bx: &mut Builder<'_, 'll, 'tcx>, name: Symbol, - callee_ty: Ty<'tcx>, fn_args: GenericArgsRef<'tcx>, args: &[OperandRef<'tcx, &'ll Value>], ret_ty: Ty<'tcx>, @@ -1222,26 +1209,22 @@ fn generic_simd_intrinsic<'ll, 'tcx>( bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len)) } - let tcx = bx.tcx(); - let sig = tcx.normalize_erasing_late_bound_regions(bx.typing_env(), callee_ty.fn_sig(tcx)); - let arg_tys = sig.inputs(); - // Sanity-check: all vector arguments must be immediates. if cfg!(debug_assertions) { - for (ty, arg) in arg_tys.iter().zip(args) { - if ty.is_simd() { + for arg in args { + if arg.layout.ty.is_simd() { assert_matches!(arg.val, OperandValue::Immediate(_)); } } } if name == sym::simd_select_bitmask { - let (len, _) = require_simd!(arg_tys[1], SimdArgument); + let (len, _) = require_simd!(args[1].layout.ty, SimdArgument); let expected_int_bits = len.max(8).next_power_of_two(); let expected_bytes = len.div_ceil(8); - let mask_ty = arg_tys[0]; + let mask_ty = args[0].layout.ty; let mask = match mask_ty.kind() { ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(), ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(), @@ -1275,8 +1258,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } // every intrinsic below takes a SIMD vector as its first argument - let (in_len, in_elem) = require_simd!(arg_tys[0], SimdInput); - let in_ty = arg_tys[0]; + let (in_len, in_elem) = require_simd!(args[0].layout.ty, SimdInput); + let in_ty = args[0].layout.ty; let comparison = match name { sym::simd_eq => Some(BinOp::Eq), @@ -1407,13 +1390,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>( if name == sym::simd_insert || name == sym::simd_insert_dyn { require!( - in_elem == arg_tys[2], + in_elem == args[2].layout.ty, InvalidMonomorphization::InsertedType { span, name, in_elem, in_ty, - out_ty: arg_tys[2] + out_ty: args[2].layout.ty } ); @@ -1464,7 +1447,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( if name == sym::simd_select { let m_elem_ty = in_elem; let m_len = in_len; - let (v_len, _) = require_simd!(arg_tys[1], SimdArgument); + let (v_len, _) = require_simd!(args[1].layout.ty, SimdArgument); require!( m_len == v_len, InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len } @@ -1665,9 +1648,9 @@ fn generic_simd_intrinsic<'ll, 'tcx>( // The second argument must be a simd vector with an element type that's a pointer // to the element type of the first argument let (_, element_ty0) = require_simd!(in_ty, SimdFirst); - let (out_len, element_ty1) = require_simd!(arg_tys[1], SimdSecond); + let (out_len, element_ty1) = require_simd!(args[1].layout.ty, SimdSecond); // The element type of the third argument must be a signed integer type of any width: - let (out_len2, element_ty2) = require_simd!(arg_tys[2], SimdThird); + let (out_len2, element_ty2) = require_simd!(args[2].layout.ty, SimdThird); require_simd!(ret_ty, SimdReturn); // Of the same length: @@ -1678,7 +1661,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( name, in_len, in_ty, - arg_ty: arg_tys[1], + arg_ty: args[1].layout.ty, out_len } ); @@ -1689,7 +1672,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( name, in_len, in_ty, - arg_ty: arg_tys[2], + arg_ty: args[2].layout.ty, out_len: out_len2 } ); @@ -1709,7 +1692,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( span, name, expected_element: element_ty1, - second_arg: arg_tys[1], + second_arg: args[1].layout.ty, in_elem, in_ty, mutability: ExpectedPointerMutability::Not, @@ -1770,10 +1753,10 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let (mask_len, mask_elem) = (in_len, in_elem); // The second argument must be a pointer matching the element type - let pointer_ty = arg_tys[1]; + let pointer_ty = args[1].layout.ty; // The last argument is a passthrough vector providing values for disabled lanes - let values_ty = arg_tys[2]; + let values_ty = args[2].layout.ty; let (values_len, values_elem) = require_simd!(values_ty, SimdThird); require_simd!(ret_ty, SimdReturn); @@ -1861,10 +1844,10 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let (mask_len, mask_elem) = (in_len, in_elem); // The second argument must be a pointer matching the element type - let pointer_ty = arg_tys[1]; + let pointer_ty = args[1].layout.ty; // The last argument specifies the values to store to memory - let values_ty = arg_tys[2]; + let values_ty = args[2].layout.ty; let (values_len, values_elem) = require_simd!(values_ty, SimdThird); // Of the same length: @@ -1944,8 +1927,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>( // The second argument must be a simd vector with an element type that's a pointer // to the element type of the first argument let (_, element_ty0) = require_simd!(in_ty, SimdFirst); - let (element_len1, element_ty1) = require_simd!(arg_tys[1], SimdSecond); - let (element_len2, element_ty2) = require_simd!(arg_tys[2], SimdThird); + let (element_len1, element_ty1) = require_simd!(args[1].layout.ty, SimdSecond); + let (element_len2, element_ty2) = require_simd!(args[2].layout.ty, SimdThird); // Of the same length: require!( @@ -1955,7 +1938,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( name, in_len, in_ty, - arg_ty: arg_tys[1], + arg_ty: args[1].layout.ty, out_len: element_len1 } ); @@ -1966,7 +1949,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( name, in_len, in_ty, - arg_ty: arg_tys[2], + arg_ty: args[2].layout.ty, out_len: element_len2 } ); @@ -1981,7 +1964,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( span, name, expected_element: element_ty1, - second_arg: arg_tys[1], + second_arg: args[1].layout.ty, in_elem, in_ty, mutability: ExpectedPointerMutability::Mut, @@ -2503,7 +2486,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let ptrs = args[0].immediate(); // The second argument must be a ptr-sized integer. // (We don't care about the signedness, this is wrapping anyway.) - let (_offsets_len, offsets_elem) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (_offsets_len, offsets_elem) = args[1].layout.ty.simd_size_and_type(bx.tcx()); if !matches!(offsets_elem.kind(), ty::Int(ty::IntTy::Isize) | ty::Uint(ty::UintTy::Usize)) { span_bug!( span, @@ -2527,8 +2510,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>( return_error!(InvalidMonomorphization::ExpectedVectorElementType { span, name, - expected_element: arg_tys[0].simd_size_and_type(bx.tcx()).1, - vector_type: arg_tys[0] + expected_element: args[0].layout.ty.simd_size_and_type(bx.tcx()).1, + vector_type: args[0].layout.ty }); } }; diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index a79d67bb6cdcc..8c6f52084c2e4 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -1,7 +1,7 @@ use rustc_abi::WrappingRange; +use rustc_middle::bug; use rustc_middle::mir::SourceInfo; use rustc_middle::ty::{self, Ty, TyCtxt}; -use rustc_middle::{bug, span_bug}; use rustc_session::config::OptLevel; use rustc_span::sym; @@ -60,18 +60,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { source_info: SourceInfo, ) -> Result<(), ty::Instance<'tcx>> { let span = source_info.span; - let callee_ty = instance.ty(bx.tcx(), bx.typing_env()); - let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else { - span_bug!(span, "expected fn item type, found {}", callee_ty); - }; - - let sig = callee_ty.fn_sig(bx.tcx()); - let sig = bx.tcx().normalize_erasing_late_bound_regions(bx.typing_env(), sig); - let arg_tys = sig.inputs(); - let ret_ty = sig.output(); - let name = bx.tcx().item_name(def_id); + let name = bx.tcx().item_name(instance.def_id()); let name_str = name.as_str(); + let fn_args = instance.args; // If we're swapping something that's *not* an `OperandValue::Ref`, // then we can do it directly and avoid the alloca. @@ -97,13 +89,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } - let llret_ty = bx.backend_type(bx.layout_of(ret_ty)); - let ret_llval = |bx: &mut Bx, llval| { if result.layout.ty.is_bool() { - OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout) - .val - .store(bx, result); + let val = bx.from_immediate(llval); + bx.store_to_place(val, result.val); } else if !result.layout.ty.is_unit() { bx.store_to_place(llval, result.val); } @@ -143,7 +132,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN, _ => bug!(), }; - let value = meth::VirtualIndex::from_index(idx).get_usize(bx, vtable, callee_ty); + let value = meth::VirtualIndex::from_index(idx).get_usize( + bx, + vtable, + instance.ty(bx.tcx(), bx.typing_env()), + ); match name { // Size is always <= isize::MAX. sym::vtable_size => { @@ -164,7 +157,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { | sym::type_name | sym::variant_count => { let value = bx.tcx().const_eval_instance(bx.typing_env(), instance, span).unwrap(); - OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx) + OperandRef::from_const(bx, value, result.layout.ty).immediate_or_packed_pair(bx) } sym::arith_offset => { let ty = fn_args.type_at(0); @@ -248,7 +241,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.or_disjoint(a, b) } sym::exact_div => { - let ty = arg_tys[0]; + let ty = args[0].layout.ty; match int_type_width_signed(ty, bx.tcx()) { Some((_width, signed)) => { if signed { @@ -268,7 +261,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => { - match float_type_width(arg_tys[0]) { + match float_type_width(args[0].layout.ty) { Some(_width) => match name { sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()), sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()), @@ -281,7 +274,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType { span, name, - ty: arg_tys[0], + ty: args[0].layout.ty, }); return Ok(()); } @@ -291,7 +284,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { | sym::fsub_algebraic | sym::fmul_algebraic | sym::fdiv_algebraic - | sym::frem_algebraic => match float_type_width(arg_tys[0]) { + | sym::frem_algebraic => match float_type_width(args[0].layout.ty) { Some(_width) => match name { sym::fadd_algebraic => { bx.fadd_algebraic(args[0].immediate(), args[1].immediate()) @@ -314,31 +307,32 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType { span, name, - ty: arg_tys[0], + ty: args[0].layout.ty, }); return Ok(()); } }, sym::float_to_int_unchecked => { - if float_type_width(arg_tys[0]).is_none() { + if float_type_width(args[0].layout.ty).is_none() { bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked { span, - ty: arg_tys[0], + ty: args[0].layout.ty, }); return Ok(()); } - let Some((_width, signed)) = int_type_width_signed(ret_ty, bx.tcx()) else { + let Some((_width, signed)) = int_type_width_signed(result.layout.ty, bx.tcx()) + else { bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked { span, - ty: ret_ty, + ty: result.layout.ty, }); return Ok(()); }; if signed { - bx.fptosi(args[0].immediate(), llret_ty) + bx.fptosi(args[0].immediate(), bx.backend_type(result.layout)) } else { - bx.fptoui(args[0].immediate(), llret_ty) + bx.fptoui(args[0].immediate(), bx.backend_type(result.layout)) } }