Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
Show all changes
41 commits
Select commit Hold shift + click to select a range
deb901c
Use closures more consistently in `dep_graph.rs`.
nnethercote Mar 17, 2026
39f7cdb
update thin-vec
malezjaa Apr 8, 2026
b009e5f
remove try-normalize-use-tree
Shourya742 Apr 9, 2026
df98ac2
Fix rustfmt relative custom command
erfanio Apr 10, 2026
dacfa3c
fix: no complete term expressions on qualified path
A4-Tacks Apr 10, 2026
f1f6d56
fix: no imports on type anchor qualified path
A4-Tacks Apr 10, 2026
65bae63
Use create-github-app-token to get token for gen-lints
lnicola Apr 10, 2026
32c1e17
Merge pull request #22011 from lnicola/gen-lints-token
lnicola Apr 10, 2026
a816b21
Merge pull request #22002 from Shourya742/2026-04-09-remove-try-norma…
ChayimFriedman2 Apr 10, 2026
247651d
Bump create-github-app-token
lnicola Apr 10, 2026
9c7ad1a
Merge pull request #22013 from lnicola/bump-create-github-app-token
lnicola Apr 10, 2026
cdcd9b4
internal: update generated lints
workflows-rust-analyzer[bot] Apr 10, 2026
2629a77
Use last good clippy lints JSON in codegen
lnicola Apr 10, 2026
79758ac
Merge pull request #21998 from rust-lang/ci/gen-lints
lnicola Apr 10, 2026
198e4c4
Merge pull request #22010 from erfanio/rustfmt-fix
Veykril Apr 11, 2026
f9b6718
add regression test for OpenOptionsExt downstream compat
Vastargazing Apr 11, 2026
671b660
Parse `cfg_attr` and `cfg` specially
ChayimFriedman2 Apr 6, 2026
1b81c1d
Merge pull request #21965 from ChayimFriedman2/refactor-cfg-attr
ChayimFriedman2 Apr 11, 2026
fc2c72c
Make the expansion of guard metavars begin guard non-terminals
fmease Apr 12, 2026
8865266
Merge pull request #22012 from A4-Tacks/no-import-on-type-anchor-qual…
Veykril Apr 12, 2026
c0ad604
Merge pull request #22009 from A4-Tacks/no-expr-in-qualified
Veykril Apr 12, 2026
25a92d2
Add #![unstable_removed(..)] attribute to track removed features
Ozzy1423 Mar 3, 2026
ca6a851
cg_llvm: replace `sve_cast` with `simd_cast`
davidtwco Apr 10, 2026
62ffc89
cg_llvm: scalable vectors with `simd_select`
davidtwco Apr 10, 2026
791a3dc
Revert "Fix cycles during delayed lowering"
aerooneqq Apr 13, 2026
d283703
Add tests for ICEs when hir_crate_items executed before delayed lowering
aerooneqq Apr 13, 2026
51888a1
Support proper interaction of user-specified args and impl Traits
aerooneqq Apr 13, 2026
eb11900
add regression test for RTN assoc type restriction ICE
TaKO8Ki Apr 7, 2026
dbf8681
handle RTN projections in assoc type restriction diagnostics
TaKO8Ki Apr 7, 2026
205dd6f
reduce ICE reproducer
TaKO8Ki Apr 8, 2026
9339abb
move tests to associated-type-bounds/return-type-notation
TaKO8Ki Apr 13, 2026
23c6bc9
Rollup merge of #155227 - lnicola:sync-from-ra, r=lnicola
JonathanBrouwer Apr 13, 2026
d4037f7
Rollup merge of #153335 - Ozzy1423:removed-features, r=jdonszelmann
JonathanBrouwer Apr 13, 2026
7370657
Rollup merge of #154932 - TaKO8Ki:fix-152887-rtn-assoc-type-name, r=j…
JonathanBrouwer Apr 13, 2026
ab551b1
Rollup merge of #155096 - aerooneqq:delegation-user-specified-args-im…
JonathanBrouwer Apr 13, 2026
83a57ca
Rollup merge of #155106 - davidtwco:scalable-vector-more-simd-intrins…
JonathanBrouwer Apr 13, 2026
49f0536
Rollup merge of #155140 - Vastargazing:open-options-ext-test, r=jdons…
JonathanBrouwer Apr 13, 2026
49b5708
Rollup merge of #155182 - fmease:guard-exp-begins-guard, r=petrochenkov
JonathanBrouwer Apr 13, 2026
14b5df4
Rollup merge of #155226 - aerooneqq:delegation-hir-crate-items-revert…
JonathanBrouwer Apr 13, 2026
2f607ee
Rollup merge of #153997 - nnethercote:closure-consistency, r=petroche…
JonathanBrouwer Apr 13, 2026
f8a8c9e
Rollup merge of #155003 - malezjaa:update-thinvec, r=davidtwco
JonathanBrouwer Apr 13, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions compiler/rustc_abi/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@ use bitflags::bitflags;
#[cfg(feature = "nightly")]
use rustc_data_structures::stable_hasher::StableOrd;
#[cfg(feature = "nightly")]
use rustc_error_messages::{DiagArgValue, IntoDiagArg};
#[cfg(feature = "nightly")]
use rustc_errors::{Diag, DiagCtxtHandle, Diagnostic, EmissionGuarantee, Level, msg};
use rustc_hashes::Hash64;
use rustc_index::{Idx, IndexSlice, IndexVec};
Expand Down Expand Up @@ -1775,6 +1777,24 @@ impl NumScalableVectors {
}
}

#[cfg(feature = "nightly")]
impl IntoDiagArg for NumScalableVectors {
fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue {
DiagArgValue::Str(std::borrow::Cow::Borrowed(match self.0 {
0 => panic!("`NumScalableVectors(0)` is illformed"),
1 => "one",
2 => "two",
3 => "three",
4 => "four",
5 => "five",
6 => "six",
7 => "seven",
8 => "eight",
_ => panic!("`NumScalableVectors(N)` for N>8 is illformed"),
}))
}
}

/// The way we represent values to the backend
///
/// Previously this was conflated with the "ABI" a type is given, as in the platform-specific ABI.
Expand Down
236 changes: 125 additions & 111 deletions compiler/rustc_codegen_llvm/src/intrinsic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -606,27 +606,6 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
self.pointercast(val, self.type_ptr())
}

sym::sve_cast => {
let Some((in_cnt, in_elem, in_num_vecs)) =
args[0].layout.ty.scalable_vector_parts(self.cx.tcx)
else {
bug!("input parameter to `sve_cast` was not scalable vector");
};
let out_layout = self.layout_of(fn_args.type_at(1));
let Some((out_cnt, out_elem, out_num_vecs)) =
out_layout.ty.scalable_vector_parts(self.cx.tcx)
else {
bug!("output parameter to `sve_cast` was not scalable vector");
};
assert_eq!(in_cnt, out_cnt);
assert_eq!(in_num_vecs, out_num_vecs);
let out_llty = self.backend_type(out_layout);
match simd_cast(self, sym::simd_cast, args, out_llty, in_elem, out_elem) {
Some(val) => val,
_ => bug!("could not cast scalable vectors"),
}
}

sym::sve_tuple_create2 => {
assert_matches!(
self.layout_of(fn_args.type_at(0)).backend_repr,
Expand Down Expand Up @@ -1668,6 +1647,23 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
}};
}

macro_rules! require_simd_or_scalable {
($ty: expr, $variant:ident) => {{
require!(
$ty.is_simd() || $ty.is_scalable_vector(),
InvalidMonomorphization::$variant { span, name, ty: $ty }
);
if $ty.is_simd() {
let (len, ty) = $ty.simd_size_and_type(bx.tcx());
(len, ty, None)
} else {
let (count, ty, num_vecs) =
$ty.scalable_vector_parts(bx.tcx()).expect("`is_scalable_vector` was wrong");
(count as u64, ty, Some(num_vecs))
}
}};
}

/// Returns the bitwidth of the `$ty` argument if it is an `Int` or `Uint` type.
macro_rules! require_int_or_uint_ty {
($ty: expr, $diag: expr) => {
Expand Down Expand Up @@ -1787,8 +1783,19 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
return Ok(splat);
}

// every intrinsic below takes a SIMD vector as its first argument
let (in_len, in_elem) = require_simd!(args[0].layout.ty, SimdInput);
let supports_scalable = match name {
sym::simd_cast | sym::simd_select => true,
_ => false,
};

// Every intrinsic below takes a SIMD vector as its first argument. Some intrinsics also accept
// scalable vectors. `require_simd_or_scalable` is used regardless as it'll do the right thing
// for non-scalable vectors, and an additional check to prohibit scalable vectors for those
// intrinsics that do not support them is added.
if !supports_scalable {
let _ = require_simd!(args[0].layout.ty, SimdInput);
}
let (in_len, in_elem, in_num_vecs) = require_simd_or_scalable!(args[0].layout.ty, SimdInput);
let in_ty = args[0].layout.ty;

let comparison = match name {
Expand Down Expand Up @@ -1977,7 +1984,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
if name == sym::simd_select {
let m_elem_ty = in_elem;
let m_len = in_len;
let (v_len, _) = require_simd!(args[1].layout.ty, SimdArgument);
let (v_len, _, _) = require_simd_or_scalable!(args[1].layout.ty, SimdArgument);
require!(
m_len == v_len,
InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len }
Expand Down Expand Up @@ -2781,7 +2788,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
}

if name == sym::simd_cast || name == sym::simd_as {
let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
let (out_len, out_elem, out_num_vecs) = require_simd_or_scalable!(ret_ty, SimdReturn);
require!(
in_len == out_len,
InvalidMonomorphization::ReturnLengthInputType {
Expand All @@ -2793,9 +2800,99 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
out_len
}
);
match simd_cast(bx, name, args, llret_ty, in_elem, out_elem) {
Some(val) => return Ok(val),
None => return_error!(InvalidMonomorphization::UnsupportedCast {
require!(
in_num_vecs == out_num_vecs,
InvalidMonomorphization::ReturnNumVecsInputType {
span,
name,
in_num_vecs: in_num_vecs.unwrap_or(NumScalableVectors(1)),
in_ty,
ret_ty,
out_num_vecs: out_num_vecs.unwrap_or(NumScalableVectors(1))
}
);

// Casting cares about nominal type, not just structural type
if in_elem == out_elem {
return Ok(args[0].immediate());
}

#[derive(Copy, Clone)]
enum Sign {
Unsigned,
Signed,
}
use Sign::*;

enum Style {
Float,
Int(Sign),
Unsupported,
}

let (in_style, in_width) = match in_elem.kind() {
// vectors of pointer-sized integers should've been
// disallowed before here, so this unwrap is safe.
ty::Int(i) => (
Style::Int(Signed),
i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
),
ty::Uint(u) => (
Style::Int(Unsigned),
u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
),
ty::Float(f) => (Style::Float, f.bit_width()),
_ => (Style::Unsupported, 0),
};
let (out_style, out_width) = match out_elem.kind() {
ty::Int(i) => (
Style::Int(Signed),
i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
),
ty::Uint(u) => (
Style::Int(Unsigned),
u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
),
ty::Float(f) => (Style::Float, f.bit_width()),
_ => (Style::Unsupported, 0),
};

match (in_style, out_style) {
(Style::Int(sign), Style::Int(_)) => {
return Ok(match in_width.cmp(&out_width) {
Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
Ordering::Equal => args[0].immediate(),
Ordering::Less => match sign {
Sign::Signed => bx.sext(args[0].immediate(), llret_ty),
Sign::Unsigned => bx.zext(args[0].immediate(), llret_ty),
},
});
}
(Style::Int(Sign::Signed), Style::Float) => {
return Ok(bx.sitofp(args[0].immediate(), llret_ty));
}
(Style::Int(Sign::Unsigned), Style::Float) => {
return Ok(bx.uitofp(args[0].immediate(), llret_ty));
}
(Style::Float, Style::Int(sign)) => {
return Ok(match (sign, name == sym::simd_as) {
(Sign::Unsigned, false) => bx.fptoui(args[0].immediate(), llret_ty),
(Sign::Signed, false) => bx.fptosi(args[0].immediate(), llret_ty),
(_, true) => bx.cast_float_to_int(
matches!(sign, Sign::Signed),
args[0].immediate(),
llret_ty,
),
});
}
(Style::Float, Style::Float) => {
return Ok(match in_width.cmp(&out_width) {
Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
Ordering::Equal => args[0].immediate(),
Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
});
}
_ => return_error!(InvalidMonomorphization::UnsupportedCast {
span,
name,
in_ty,
Expand Down Expand Up @@ -2977,86 +3074,3 @@ fn generic_simd_intrinsic<'ll, 'tcx>(

span_bug!(span, "unknown SIMD intrinsic");
}

/// Implementation of `core::intrinsics::simd_cast`, re-used by `core::scalable::sve_cast`.
fn simd_cast<'ll, 'tcx>(
bx: &mut Builder<'_, 'll, 'tcx>,
name: Symbol,
args: &[OperandRef<'tcx, &'ll Value>],
llret_ty: &'ll Type,
in_elem: Ty<'tcx>,
out_elem: Ty<'tcx>,
) -> Option<&'ll Value> {
// Casting cares about nominal type, not just structural type
if in_elem == out_elem {
return Some(args[0].immediate());
}

#[derive(Copy, Clone)]
enum Sign {
Unsigned,
Signed,
}
use Sign::*;

enum Style {
Float,
Int(Sign),
Unsupported,
}

let (in_style, in_width) = match in_elem.kind() {
// vectors of pointer-sized integers should've been
// disallowed before here, so this unwrap is safe.
ty::Int(i) => (
Style::Int(Signed),
i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
),
ty::Uint(u) => (
Style::Int(Unsigned),
u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
),
ty::Float(f) => (Style::Float, f.bit_width()),
_ => (Style::Unsupported, 0),
};
let (out_style, out_width) = match out_elem.kind() {
ty::Int(i) => (
Style::Int(Signed),
i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
),
ty::Uint(u) => (
Style::Int(Unsigned),
u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
),
ty::Float(f) => (Style::Float, f.bit_width()),
_ => (Style::Unsupported, 0),
};

match (in_style, out_style) {
(Style::Int(sign), Style::Int(_)) => Some(match in_width.cmp(&out_width) {
Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
Ordering::Equal => args[0].immediate(),
Ordering::Less => match sign {
Sign::Signed => bx.sext(args[0].immediate(), llret_ty),
Sign::Unsigned => bx.zext(args[0].immediate(), llret_ty),
},
}),
(Style::Int(Sign::Signed), Style::Float) => Some(bx.sitofp(args[0].immediate(), llret_ty)),
(Style::Int(Sign::Unsigned), Style::Float) => {
Some(bx.uitofp(args[0].immediate(), llret_ty))
}
(Style::Float, Style::Int(sign)) => Some(match (sign, name == sym::simd_as) {
(Sign::Unsigned, false) => bx.fptoui(args[0].immediate(), llret_ty),
(Sign::Signed, false) => bx.fptosi(args[0].immediate(), llret_ty),
(_, true) => {
bx.cast_float_to_int(matches!(sign, Sign::Signed), args[0].immediate(), llret_ty)
}
}),
(Style::Float, Style::Float) => Some(match in_width.cmp(&out_width) {
Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
Ordering::Equal => args[0].immediate(),
Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
}),
_ => None,
}
}
12 changes: 12 additions & 0 deletions compiler/rustc_codegen_ssa/src/errors.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ use std::io::Error;
use std::path::{Path, PathBuf};
use std::process::ExitStatus;

use rustc_abi::NumScalableVectors;
use rustc_errors::codes::*;
use rustc_errors::{
Diag, DiagArgValue, DiagCtxtHandle, DiagSymbolList, Diagnostic, EmissionGuarantee, IntoDiagArg,
Expand Down Expand Up @@ -809,6 +810,17 @@ pub enum InvalidMonomorphization<'tcx> {
out_len: u64,
},

#[diag("invalid monomorphization of `{$name}` intrinsic: expected return type with {$in_num_vecs} vectors (same as input type `{$in_ty}`), found `{$ret_ty}` with length {$out_num_vecs}", code = E0511)]
ReturnNumVecsInputType {
#[primary_span]
span: Span,
name: Symbol,
in_num_vecs: NumScalableVectors,
in_ty: Ty<'tcx>,
ret_ty: Ty<'tcx>,
out_num_vecs: NumScalableVectors,
},

#[diag("invalid monomorphization of `{$name}` intrinsic: expected second argument with length {$in_len} (same as input type `{$in_ty}`), found `{$arg_ty}` with length {$out_len}", code = E0511)]
SecondArgumentLength {
#[primary_span]
Expand Down
21 changes: 0 additions & 21 deletions library/core/src/intrinsics/simd/scalable.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,27 +2,6 @@
//!
//! In this module, a "vector" is any `#[rustc_scalable_vector]`-annotated type.

/// Numerically casts a vector, elementwise.
///
/// `T` and `U` must be vectors of integers or floats, and must have the same length.
///
/// When casting floats to integers, the result is truncated. Out-of-bounds result lead to UB.
/// When casting integers to floats, the result is rounded.
/// Otherwise, truncates or extends the value, maintaining the sign for signed integers.
///
/// # Safety
/// Casting from integer types is always safe.
/// Casting between two float types is also always safe.
///
/// Casting floats to integers truncates, following the same rules as `to_int_unchecked`.
/// Specifically, each element must:
/// * Not be `NaN`
/// * Not be infinite
/// * Be representable in the return type, after truncating off its fractional part
#[rustc_intrinsic]
#[rustc_nounwind]
pub unsafe fn sve_cast<T, U>(x: T) -> U;

/// Create a tuple of two vectors.
///
/// `SVecTup` must be a scalable vector tuple (`#[rustc_scalable_vector]`) and `SVec` must be a
Expand Down
4 changes: 2 additions & 2 deletions tests/ui/scalable-vectors/cast-intrinsic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
#![allow(incomplete_features, internal_features, improper_ctypes)]
#![feature(abi_unadjusted, core_intrinsics, link_llvm_intrinsics, rustc_attrs)]

use std::intrinsics::simd::scalable::sve_cast;
use std::intrinsics::simd::simd_cast;

#[derive(Copy, Clone)]
#[rustc_scalable_vector(16)]
Expand Down Expand Up @@ -61,5 +61,5 @@ pub unsafe fn svld1sh_gather_s64offset_s64(
offsets: svint64_t,
) -> nxv2i16;
}
sve_cast(_svld1sh_gather_s64offset_s64(pg.sve_into(), base, offsets))
simd_cast(_svld1sh_gather_s64offset_s64(pg.sve_into(), base, offsets))
}
Loading