From 65a434a49196120c413ab350098bbcb5d68cf213 Mon Sep 17 00:00:00 2001 From: Linwei Shang Date: Thu, 5 Sep 2024 14:18:00 -0400 Subject: [PATCH] Implement the table64 extension to the memory64 proposal This commit implements the table64 extention in both Wasmtime and Cranelift. Most of the work was changing a bunch of u32 values to u64/usize. The decisions were made in align with the PR #3153 which implemented the memory64 propsal itself. One significant change was the introduction of `IndexType` and `Limits` which streamline and unify the handling of limits for both memories and tables. The spec and fuzzing tests related to table64 are re-enabled which provides a good coverage of the feature. --- cranelift/wasm/src/environ/dummy.rs | 2 +- cranelift/wasm/src/environ/spec.rs | 2 +- cranelift/wasm/src/sections_translator.rs | 15 +- cranelift/wasm/src/table.rs | 30 +- crates/c-api/include/wasmtime/table.h | 10 +- crates/c-api/src/store.rs | 2 +- crates/c-api/src/table.rs | 22 +- crates/c-api/src/types/table.rs | 4 +- crates/cli-flags/src/lib.rs | 4 +- crates/cranelift/src/func_environ.rs | 241 ++++++++------- crates/environ/src/builtin.rs | 14 +- crates/environ/src/compile/module_environ.rs | 30 +- .../environ/src/component/translate/inline.rs | 11 +- crates/environ/src/module.rs | 6 +- crates/environ/src/vmoffsets.rs | 2 +- crates/fuzzing/src/generators/config.rs | 5 - .../fuzzing/src/generators/pooling_config.rs | 4 +- crates/fuzzing/src/oracles.rs | 9 +- crates/types/src/error.rs | 13 +- crates/types/src/lib.rs | 93 ++++-- crates/wasmtime/src/config.rs | 2 +- .../wasmtime/src/runtime/externals/table.rs | 36 ++- crates/wasmtime/src/runtime/limits.rs | 24 +- crates/wasmtime/src/runtime/linker.rs | 2 +- crates/wasmtime/src/runtime/module.rs | 4 +- crates/wasmtime/src/runtime/resources.rs | 2 +- crates/wasmtime/src/runtime/store.rs | 6 +- .../wasmtime/src/runtime/trampoline/table.rs | 4 +- crates/wasmtime/src/runtime/types.rs | 92 ++++-- crates/wasmtime/src/runtime/types/matching.rs | 57 ++-- crates/wasmtime/src/runtime/vm.rs | 6 +- crates/wasmtime/src/runtime/vm/cow.rs | 7 +- crates/wasmtime/src/runtime/vm/instance.rs | 33 +- .../src/runtime/vm/instance/allocator.rs | 31 +- .../runtime/vm/instance/allocator/pooling.rs | 2 +- .../instance/allocator/pooling/table_pool.rs | 11 +- crates/wasmtime/src/runtime/vm/libcalls.rs | 48 +-- crates/wasmtime/src/runtime/vm/memory.rs | 4 +- crates/wasmtime/src/runtime/vm/table.rs | 178 ++++++----- crates/wasmtime/src/runtime/vm/vmcontext.rs | 2 +- crates/wast/src/spectest.rs | 4 + crates/wast/src/wast.rs | 1 + tests/all/cli_tests.rs | 2 +- tests/all/externals.rs | 3 +- tests/all/limits.rs | 62 ++-- tests/all/memory.rs | 6 +- tests/all/pooling_allocator.rs | 10 +- tests/disas/icall-loop.wat | 80 ++--- tests/disas/icall-simd.wat | 27 +- tests/disas/icall.wat | 27 +- tests/disas/indirect-call-no-caching.wat | 27 +- tests/disas/readonly-funcrefs.wat | 30 +- tests/disas/table-copy.wat | 26 +- tests/disas/table-get.wat | 250 ++++++++-------- tests/disas/table-set.wat | 282 +++++++++--------- tests/disas/typed-funcrefs.wat | 108 +++---- .../winch/x64/call_indirect/call_indirect.wat | 60 ++-- .../winch/x64/call_indirect/local_arg.wat | 30 +- tests/disas/winch/x64/table/fill.wat | 22 +- tests/disas/winch/x64/table/get.wat | 22 +- .../disas/winch/x64/table/init_copy_drop.wat | 48 +-- tests/disas/winch/x64/table/set.wat | 44 +-- tests/disas/winch/x64/table/size.wat | 2 +- tests/rlimited-memory.rs | 6 +- tests/wast.rs | 11 - winch/codegen/src/codegen/env.rs | 7 +- winch/codegen/src/visitor.rs | 8 +- 67 files changed, 1252 insertions(+), 1023 deletions(-) diff --git a/cranelift/wasm/src/environ/dummy.rs b/cranelift/wasm/src/environ/dummy.rs index cb4b09bcec44..2c7d1f5f3489 100644 --- a/cranelift/wasm/src/environ/dummy.rs +++ b/cranelift/wasm/src/environ/dummy.rs @@ -761,7 +761,7 @@ impl<'data> ModuleEnvironment<'data> for DummyEnvironment { &mut self, _table_index: TableIndex, _base: Option, - _offset: u32, + _offset: u64, _elements: Box<[FuncIndex]>, ) -> WasmResult<()> { // We do nothing diff --git a/cranelift/wasm/src/environ/spec.rs b/cranelift/wasm/src/environ/spec.rs index 61c272dbcf7b..a673894e1046 100644 --- a/cranelift/wasm/src/environ/spec.rs +++ b/cranelift/wasm/src/environ/spec.rs @@ -833,7 +833,7 @@ pub trait ModuleEnvironment<'data>: TypeConvert { &mut self, table_index: TableIndex, base: Option, - offset: u32, + offset: u64, elements: Box<[FuncIndex]>, ) -> WasmResult<()>; diff --git a/cranelift/wasm/src/sections_translator.rs b/cranelift/wasm/src/sections_translator.rs index e033f13db0be..8943dab70b1b 100644 --- a/cranelift/wasm/src/sections_translator.rs +++ b/cranelift/wasm/src/sections_translator.rs @@ -14,7 +14,7 @@ use crate::{ TypeIndex, WasmError, WasmResult, }; use cranelift_entity::packed_option::ReservedValue; -use cranelift_entity::EntityRef; +use cranelift_entity::{EntityRef, Unsigned}; use std::boxed::Box; use std::vec::Vec; use wasmparser::{ @@ -185,7 +185,7 @@ pub fn parse_export_section<'data>( // The input has already been validated, so we should be able to // assume valid UTF-8 and use `from_utf8_unchecked` if performance // becomes a concern here. - let index = index as usize; + let index = usize::try_from(index)?; match *kind { ExternalKind::Func => environ.declare_func_export(FuncIndex::new(index), name)?, ExternalKind::Table => environ.declare_table_export(TableIndex::new(index), name)?, @@ -252,7 +252,8 @@ pub fn parse_element_section<'data>( } => { let mut offset_expr_reader = offset_expr.get_binary_reader(); let (base, offset) = match offset_expr_reader.read_operator()? { - Operator::I32Const { value } => (None, value as u32), + Operator::I32Const { value } => (None, u64::from(value.unsigned())), + Operator::I64Const { value } => (None, value.unsigned()), Operator::GlobalGet { global_index } => { (Some(GlobalIndex::from_u32(global_index)), 0) } @@ -271,7 +272,7 @@ pub fn parse_element_section<'data>( )? } ElementKind::Passive => { - let index = ElemIndex::from_u32(index as u32); + let index = ElemIndex::from_u32(u32::try_from(index)?); environ.declare_passive_element(index, segments)?; } ElementKind::Declared => { @@ -302,8 +303,8 @@ pub fn parse_data_section<'data>( } => { let mut offset_expr_reader = offset_expr.get_binary_reader(); let (base, offset) = match offset_expr_reader.read_operator()? { - Operator::I32Const { value } => (None, value as u64), - Operator::I64Const { value } => (None, value as u64), + Operator::I32Const { value } => (None, u64::try_from(value)?), + Operator::I64Const { value } => (None, u64::try_from(value)?), Operator::GlobalGet { global_index } => { (Some(GlobalIndex::from_u32(global_index)), 0) } @@ -322,7 +323,7 @@ pub fn parse_data_section<'data>( )?; } DataKind::Passive => { - let index = DataIndex::from_u32(index as u32); + let index = DataIndex::from_u32(u32::try_from(index)?); environ.declare_passive_data(index, data)?; } } diff --git a/cranelift/wasm/src/table.rs b/cranelift/wasm/src/table.rs index bb5466670696..64d92b1d2e78 100644 --- a/cranelift/wasm/src/table.rs +++ b/cranelift/wasm/src/table.rs @@ -1,5 +1,6 @@ use cranelift_codegen::cursor::FuncCursor; use cranelift_codegen::ir::{self, condcodes::IntCC, immediates::Imm64, InstBuilder}; +use cranelift_codegen::isa::TargetIsa; use cranelift_frontend::FunctionBuilder; /// Size of a WebAssembly table, in elements. @@ -8,7 +9,7 @@ pub enum TableSize { /// Non-resizable table. Static { /// Non-resizable tables have a constant size known at compile time. - bound: u32, + bound: u64, }, /// Resizable table. Dynamic { @@ -20,10 +21,21 @@ pub enum TableSize { impl TableSize { /// Get a CLIF value representing the current bounds of this table. - pub fn bound(&self, mut pos: FuncCursor, index_ty: ir::Type) -> ir::Value { + pub fn bound(&self, isa: &dyn TargetIsa, mut pos: FuncCursor, index_ty: ir::Type) -> ir::Value { match *self { - TableSize::Static { bound } => pos.ins().iconst(index_ty, Imm64::new(i64::from(bound))), - TableSize::Dynamic { bound_gv } => pos.ins().global_value(index_ty, bound_gv), + // Instead of `i64::try_from(bound)`, here we just want to direcly interpret `bound` as an i64. + TableSize::Static { bound } => pos.ins().iconst(index_ty, Imm64::new(bound as i64)), + TableSize::Dynamic { bound_gv } => { + let ty = pos.func.global_values[bound_gv].global_type(isa); + let gv = pos.ins().global_value(ty, bound_gv); + if index_ty == ty { + gv + } else if index_ty.bytes() < ty.bytes() { + pos.ins().ireduce(index_ty, gv) + } else { + pos.ins().uextend(index_ty, gv) + } + } } } } @@ -46,22 +58,22 @@ impl TableData { /// given index within this table. pub fn prepare_table_addr( &self, + isa: &dyn TargetIsa, pos: &mut FunctionBuilder, mut index: ir::Value, - addr_ty: ir::Type, - enable_table_access_spectre_mitigation: bool, ) -> (ir::Value, ir::MemFlags) { let index_ty = pos.func.dfg.value_type(index); + let addr_ty = isa.pointer_type(); // Start with the bounds check. Trap if `index + 1 > bound`. - let bound = self.bound.bound(pos.cursor(), index_ty); + let bound = self.bound.bound(isa, pos.cursor(), index_ty); // `index > bound - 1` is the same as `index >= bound`. let oob = pos .ins() .icmp(IntCC::UnsignedGreaterThanOrEqual, index, bound); - if !enable_table_access_spectre_mitigation { + if !isa.flags().enable_table_access_spectre_mitigation() { pos.ins().trapnz(oob, ir::TrapCode::TableOutOfBounds); } @@ -88,7 +100,7 @@ impl TableData { let base_flags = ir::MemFlags::new() .with_aligned() .with_alias_region(Some(ir::AliasRegion::Table)); - if enable_table_access_spectre_mitigation { + if isa.flags().enable_table_access_spectre_mitigation() { // Short-circuit the computed table element address to a null pointer // when out-of-bounds. The consumer of this address will trap when // trying to access it. diff --git a/crates/c-api/include/wasmtime/table.h b/crates/c-api/include/wasmtime/table.h index 9ec8f3796c80..7a648691ca2a 100644 --- a/crates/c-api/include/wasmtime/table.h +++ b/crates/c-api/include/wasmtime/table.h @@ -57,7 +57,7 @@ wasmtime_table_type(const wasmtime_context_t *store, */ WASM_API_EXTERN bool wasmtime_table_get(wasmtime_context_t *store, const wasmtime_table_t *table, - uint32_t index, wasmtime_val_t *val); + uint64_t index, wasmtime_val_t *val); /** * \brief Sets a value in a table. @@ -74,12 +74,12 @@ WASM_API_EXTERN bool wasmtime_table_get(wasmtime_context_t *store, */ WASM_API_EXTERN wasmtime_error_t * wasmtime_table_set(wasmtime_context_t *store, const wasmtime_table_t *table, - uint32_t index, const wasmtime_val_t *value); + uint64_t index, const wasmtime_val_t *value); /** * \brief Returns the size, in elements, of the specified table */ -WASM_API_EXTERN uint32_t wasmtime_table_size(const wasmtime_context_t *store, +WASM_API_EXTERN uint64_t wasmtime_table_size(const wasmtime_context_t *store, const wasmtime_table_t *table); /** @@ -101,8 +101,8 @@ WASM_API_EXTERN uint32_t wasmtime_table_size(const wasmtime_context_t *store, */ WASM_API_EXTERN wasmtime_error_t * wasmtime_table_grow(wasmtime_context_t *store, const wasmtime_table_t *table, - uint32_t delta, const wasmtime_val_t *init, - uint32_t *prev_size); + uint64_t delta, const wasmtime_val_t *init, + uint64_t *prev_size); #ifdef __cplusplus } // extern "C" diff --git a/crates/c-api/src/store.rs b/crates/c-api/src/store.rs index bffe3c9a3463..90a7397db786 100644 --- a/crates/c-api/src/store.rs +++ b/crates/c-api/src/store.rs @@ -180,7 +180,7 @@ pub extern "C" fn wasmtime_store_limiter( limiter = limiter.memory_size(memory_size as usize); } if table_elements >= 0 { - limiter = limiter.table_elements(table_elements as u32); + limiter = limiter.table_elements(table_elements as usize); } if instances >= 0 { limiter = limiter.instances(instances as usize); diff --git a/crates/c-api/src/table.rs b/crates/c-api/src/table.rs index d7ff324580b8..63f451c14512 100644 --- a/crates/c-api/src/table.rs +++ b/crates/c-api/src/table.rs @@ -67,7 +67,7 @@ pub unsafe extern "C" fn wasm_table_get( index: wasm_table_size_t, ) -> Option> { let table = t.table(); - let r = table.get(t.ext.store.context_mut(), index)?; + let r = table.get(t.ext.store.context_mut(), u64::from(index))?; wasm_ref_t::new(r) } @@ -79,14 +79,16 @@ pub unsafe extern "C" fn wasm_table_set( ) -> bool { let table = t.table(); let val = option_wasm_ref_t_to_ref(r, &table.ty(t.ext.store.context())); - table.set(t.ext.store.context_mut(), index, val).is_ok() + table + .set(t.ext.store.context_mut(), u64::from(index), val) + .is_ok() } #[no_mangle] pub unsafe extern "C" fn wasm_table_size(t: &wasm_table_t) -> wasm_table_size_t { let table = t.table(); let store = t.ext.store.context(); - table.size(&store) + u32::try_from(table.size(&store)).unwrap() } #[no_mangle] @@ -97,7 +99,9 @@ pub unsafe extern "C" fn wasm_table_grow( ) -> bool { let table = t.table(); let init = option_wasm_ref_t_to_ref(init, &table.ty(t.ext.store.context())); - table.grow(t.ext.store.context_mut(), delta, init).is_ok() + table + .grow(t.ext.store.context_mut(), u64::from(delta), init) + .is_ok() } #[no_mangle] @@ -139,7 +143,7 @@ pub unsafe extern "C" fn wasmtime_table_type( pub extern "C" fn wasmtime_table_get( store: WasmtimeStoreContextMut<'_>, table: &Table, - index: u32, + index: u64, ret: &mut MaybeUninit, ) -> bool { let mut scope = RootScope::new(store); @@ -156,7 +160,7 @@ pub extern "C" fn wasmtime_table_get( pub unsafe extern "C" fn wasmtime_table_set( mut store: WasmtimeStoreContextMut<'_>, table: &Table, - index: u32, + index: u64, val: &wasmtime_val_t, ) -> Option> { let mut scope = RootScope::new(&mut store); @@ -170,7 +174,7 @@ pub unsafe extern "C" fn wasmtime_table_set( } #[no_mangle] -pub extern "C" fn wasmtime_table_size(store: WasmtimeStoreContext<'_>, table: &Table) -> u32 { +pub extern "C" fn wasmtime_table_size(store: WasmtimeStoreContext<'_>, table: &Table) -> u64 { table.size(store) } @@ -178,9 +182,9 @@ pub extern "C" fn wasmtime_table_size(store: WasmtimeStoreContext<'_>, table: &T pub unsafe extern "C" fn wasmtime_table_grow( mut store: WasmtimeStoreContextMut<'_>, table: &Table, - delta: u32, + delta: u64, val: &wasmtime_val_t, - prev_size: &mut u32, + prev_size: &mut u64, ) -> Option> { let mut scope = RootScope::new(&mut store); handle_result( diff --git a/crates/c-api/src/types/table.rs b/crates/c-api/src/types/table.rs index a31b5ef10720..0f0a87e70de8 100644 --- a/crates/c-api/src/types/table.rs +++ b/crates/c-api/src/types/table.rs @@ -74,8 +74,8 @@ pub extern "C" fn wasm_tabletype_element(tt: &wasm_tabletype_t) -> &wasm_valtype pub extern "C" fn wasm_tabletype_limits(tt: &wasm_tabletype_t) -> &wasm_limits_t { let tt = tt.ty(); tt.limits_cache.get_or_init(|| wasm_limits_t { - min: tt.ty.minimum(), - max: tt.ty.maximum().unwrap_or(u32::max_value()), + min: u32::try_from(tt.ty.minimum()).unwrap(), + max: u32::try_from(tt.ty.maximum().unwrap_or(u64::from(u32::MAX))).unwrap(), }) } diff --git a/crates/cli-flags/src/lib.rs b/crates/cli-flags/src/lib.rs index 8a7bb6dc9b6b..0f92417cfd5e 100644 --- a/crates/cli-flags/src/lib.rs +++ b/crates/cli-flags/src/lib.rs @@ -118,7 +118,7 @@ wasmtime_option_group! { /// The maximum table elements for any table defined in a module when /// using the pooling allocator. - pub pooling_table_elements: Option, + pub pooling_table_elements: Option, /// The maximum size, in bytes, allocated for a core instance's metadata /// when using the pooling allocator. @@ -215,7 +215,7 @@ wasmtime_option_group! { /// WebAssembly modules to return -1 and fail. pub max_memory_size: Option, /// Maximum size, in table elements, that a table is allowed to reach. - pub max_table_elements: Option, + pub max_table_elements: Option, /// Maximum number of WebAssembly instances allowed to be created. pub max_instances: Option, /// Maximum number of WebAssembly tables allowed to be created. diff --git a/crates/cranelift/src/func_environ.rs b/crates/cranelift/src/func_environ.rs index 12ec5698aa0d..16b32a7b6f88 100644 --- a/crates/cranelift/src/func_environ.rs +++ b/crates/cranelift/src/func_environ.rs @@ -13,8 +13,9 @@ use cranelift_frontend::FunctionBuilder; use cranelift_frontend::Variable; use cranelift_wasm::{ EngineOrModuleTypeIndex, FuncEnvironment as _, FuncIndex, FuncTranslationState, GlobalIndex, - GlobalVariable, Heap, HeapData, HeapStyle, MemoryIndex, TableData, TableIndex, TableSize, - TargetEnvironment, TypeIndex, WasmFuncType, WasmHeapTopType, WasmHeapType, WasmResult, + GlobalVariable, Heap, HeapData, HeapStyle, IndexType, Memory, MemoryIndex, Table, TableData, + TableIndex, TableSize, TargetEnvironment, TypeIndex, WasmFuncType, WasmHeapTopType, + WasmHeapType, WasmResult, }; use smallvec::SmallVec; use std::mem; @@ -672,27 +673,50 @@ impl<'module_environment> FuncEnvironment<'module_environment> { builder.switch_to_block(continuation_block); } - fn memory_index_type(&self, index: MemoryIndex) -> ir::Type { - if self.module.memory_plans[index].memory.memory64 { - I64 - } else { - I32 + /// Get the Memory for the given index. + fn memory(&self, index: MemoryIndex) -> Memory { + self.module.memory_plans[index].memory + } + + /// Get the Table for the given index. + fn table(&self, index: TableIndex) -> Table { + self.module.table_plans[index].table + } + + /// Cast the value to I64 and sign extend if necessary. + /// + /// Returns the value casted to I64. + fn cast_index_to_i64( + &self, + pos: &mut FuncCursor<'_>, + val: ir::Value, + index_type: IndexType, + ) -> ir::Value { + match index_type { + IndexType::I32 => pos.ins().uextend(I64, val), + IndexType::I64 => val, } } - /// Convert the target pointer-sized integer `val` that is holding a memory - /// length (or the `-1` `memory.grow`-failed sentinel) into the memory's - /// index type. + /// Convert the target pointer-sized integer `val` into the memory/table's index type. /// - /// This might involve extending or truncating it depending on the memory's + /// For memory, `val` is holding a memory length (or the `-1` `memory.grow`-failed sentinel). + /// For table, `val` is holding a table length. + /// + /// This might involve extending or truncating it depending on the memory/table's /// index type and the target's pointer type. - fn convert_memory_length_to_index_type( + fn convert_pointer_to_index_type( &self, mut pos: FuncCursor<'_>, val: ir::Value, - index: MemoryIndex, + index_type: IndexType, + // When it is a memory and the memory is using single-byte pages, + // we need to handle the tuncation differently. See comments below. + // + // When it is a table, this should be set to false. + single_byte_pages: bool, ) -> ir::Value { - let desired_type = self.memory_index_type(index); + let desired_type = index_type_to_ir_type(index_type); let pointer_type = self.pointer_type(); assert_eq!(pos.func.dfg.value_type(val), pointer_type); @@ -704,21 +728,24 @@ impl<'module_environment> FuncEnvironment<'module_environment> { } else if pointer_type.bits() > desired_type.bits() { pos.ins().ireduce(desired_type, val) } else { - // We have a 64-bit memory on a 32-bit host -- this combo doesn't + // We have a 64-bit memory/table on a 32-bit host -- this combo doesn't // really make a whole lot of sense to do from a user perspective // but that is neither here nor there. We want to logically do an // unsigned extend *except* when we are given the `-1` sentinel, // which we must preserve as `-1` in the wider type. - match self.module.memory_plans[index].memory.page_size_log2 { - 16 => { + match single_byte_pages { + false => { // In the case that we have default page sizes, we can // always sign extend, since valid memory lengths (in pages) // never have their sign bit set, and so if the sign bit is // set then this must be the `-1` sentinel, which we want to // preserve through the extension. + // + // When it comes to table, `single_byte_pages` should have always been set to false. + // Then we simply do a signed extension. pos.ins().sextend(desired_type, val) } - 0 => { + true => { // For single-byte pages, we have to explicitly check for // `-1` and choose whether to do an unsigned extension or // return a larger `-1` because there are valid memory @@ -728,24 +755,10 @@ impl<'module_environment> FuncEnvironment<'module_environment> { let is_failure = pos.ins().icmp_imm(IntCC::Equal, val, -1); pos.ins().select(is_failure, neg_one, extended) } - _ => unreachable!("only page sizes 2**0 and 2**16 are currently valid"), } } } - fn cast_memory_index_to_i64( - &self, - pos: &mut FuncCursor<'_>, - val: ir::Value, - index: MemoryIndex, - ) -> ir::Value { - if self.memory_index_type(index) == I64 { - val - } else { - pos.ins().uextend(I64, val) - } - } - /// Set up the necessary preamble definitions in `func` to access the table identified /// by `index`. /// @@ -784,18 +797,18 @@ impl<'module_environment> FuncEnvironment<'module_environment> { }; let table = &self.module.table_plans[index].table; - let element_size = if table.wasm_ty.is_vmgcref_type() { + let element_size = if table.ref_type.is_vmgcref_type() { // For GC-managed references, tables store `Option`s. ir::types::I32.bytes() } else { - self.reference_type(table.wasm_ty.heap_type).0.bytes() + self.reference_type(table.ref_type.heap_type).0.bytes() }; let base_gv = func.create_global_value(ir::GlobalValueData::Load { base: ptr, offset: Offset32::new(base_offset), global_type: pointer_type, - flags: if Some(table.minimum) == table.maximum { + flags: if Some(table.limits.min) == table.limits.max { // A fixed-size table can't be resized so its base address won't // change. MemFlags::trusted().with_readonly() @@ -804,9 +817,9 @@ impl<'module_environment> FuncEnvironment<'module_environment> { }, }); - let bound = if Some(table.minimum) == table.maximum { + let bound = if Some(table.limits.min) == table.limits.max { TableSize::Static { - bound: table.minimum, + bound: table.limits.min, } } else { TableSize::Dynamic { @@ -845,12 +858,7 @@ impl<'module_environment> FuncEnvironment<'module_environment> { // contents, we check for a null entry here, and // if null, we take a slow-path that invokes a // libcall. - let (table_entry_addr, flags) = table_data.prepare_table_addr( - builder, - index, - pointer_type, - self.isa.flags().enable_table_access_spectre_mitigation(), - ); + let (table_entry_addr, flags) = table_data.prepare_table_addr(&*self.isa, builder, index); let value = builder.ins().load(pointer_type, flags, table_entry_addr, 0); if !lazy_init { @@ -881,11 +889,13 @@ impl<'module_environment> FuncEnvironment<'module_environment> { builder.seal_block(null_block); builder.switch_to_block(null_block); + let index_type = self.table(table_index).idx_type; let table_index = builder.ins().iconst(I32, table_index.index() as i64); let lazy_init = self .builtin_functions .table_get_lazy_init_func_ref(builder.func); let vmctx = self.vmctx_val(&mut builder.cursor()); + let index = self.cast_index_to_i64(&mut builder.cursor(), index, index_type); let call_inst = builder.ins().call(lazy_init, &[vmctx, table_index, index]); let returned_entry = builder.func.dfg.inst_results(call_inst)[0]; builder.ins().jump(continuation_block, &[returned_entry]); @@ -1208,7 +1218,7 @@ impl<'a, 'func, 'module_env> Call<'a, 'func, 'module_env> { // Test if a type check is necessary for this table. If this table is a // table of typed functions and that type matches `ty_index`, then // there's no need to perform a typecheck. - match table.table.wasm_ty.heap_type { + match table.table.ref_type.heap_type { // Functions do not have a statically known type in the table, a // typecheck is required. Fall through to below to perform the // actual typecheck. @@ -1224,7 +1234,7 @@ impl<'a, 'func, 'module_env> Call<'a, 'func, 'module_env> { let specified_ty = self.env.module.types[ty_index]; if specified_ty == table_ty { return CheckIndirectCallTypeSignature::StaticMatch { - may_be_null: table.table.wasm_ty.nullable, + may_be_null: table.table.ref_type.nullable, }; } @@ -1236,7 +1246,7 @@ impl<'a, 'func, 'module_env> Call<'a, 'func, 'module_env> { // type information. If that fails due to the function being a // null pointer, then this was a call to null. Otherwise if it // succeeds then we know it won't match, so trap anyway. - if table.table.wasm_ty.nullable { + if table.table.ref_type.nullable { let mem_flags = ir::MemFlags::trusted().with_readonly(); self.builder.ins().load( sig_id_type, @@ -1252,7 +1262,7 @@ impl<'a, 'func, 'module_env> Call<'a, 'func, 'module_env> { // Tables of `nofunc` can only be inhabited by null, so go ahead and // trap with that. WasmHeapType::NoFunc => { - assert!(table.table.wasm_ty.nullable); + assert!(table.table.ref_type.nullable); self.builder.ins().trap(ir::TrapCode::IndirectCallToNull); return CheckIndirectCallTypeSignature::StaticTrap; } @@ -1543,12 +1553,13 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m fn translate_table_grow( &mut self, - mut pos: cranelift_codegen::cursor::FuncCursor<'_>, + mut pos: FuncCursor<'_>, table_index: TableIndex, delta: ir::Value, init_value: ir::Value, ) -> WasmResult { - let ty = self.module.table_plans[table_index].table.wasm_ty.heap_type; + let table = self.table(table_index); + let ty = table.ref_type.heap_type; let grow = if ty.is_vmgcref_type() { gc::gc_ref_table_grow_builtin(ty, self, &mut pos.func)? } else { @@ -1558,12 +1569,14 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m let vmctx = self.vmctx_val(&mut pos); + let index_type = table.idx_type; + let delta = self.cast_index_to_i64(&mut pos, delta, index_type); let table_index_arg = pos.ins().iconst(I32, table_index.as_u32() as i64); let call_inst = pos .ins() .call(grow, &[vmctx, table_index_arg, delta, init_value]); - - Ok(pos.func.dfg.first_result(call_inst)) + let result = pos.func.dfg.first_result(call_inst); + Ok(self.convert_pointer_to_index_type(pos, result, index_type, false)) } fn translate_table_get( @@ -1573,34 +1586,25 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m index: ir::Value, ) -> WasmResult { let plan = &self.module.table_plans[table_index]; + let table = plan.table; self.ensure_table_exists(builder.func, table_index); let table_data = self.tables[table_index].as_ref().unwrap(); - let heap_ty = plan.table.wasm_ty.heap_type; + let heap_ty = table.ref_type.heap_type; match heap_ty.top() { // `i31ref`s never need barriers, and therefore don't need to go // through the GC compiler. WasmHeapTopType::Any if heap_ty == WasmHeapType::I31 => { - let (src, flags) = table_data.prepare_table_addr( - builder, - index, - self.pointer_type(), - self.isa.flags().enable_table_access_spectre_mitigation(), - ); + let (src, flags) = table_data.prepare_table_addr(&*self.isa, builder, index); gc::unbarriered_load_gc_ref(self, builder, WasmHeapType::I31, src, flags) } // GC-managed types. WasmHeapTopType::Any | WasmHeapTopType::Extern => { - let (src, flags) = table_data.prepare_table_addr( - builder, - index, - self.pointer_type(), - self.isa.flags().enable_table_access_spectre_mitigation(), - ); + let (src, flags) = table_data.prepare_table_addr(&*self.isa, builder, index); gc::gc_compiler(self).translate_read_gc_reference( self, builder, - plan.table.wasm_ty, + table.ref_type, src, flags, ) @@ -1627,36 +1631,26 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m value: ir::Value, index: ir::Value, ) -> WasmResult<()> { - let pointer_type = self.pointer_type(); let plan = &self.module.table_plans[table_index]; + let table = plan.table; self.ensure_table_exists(builder.func, table_index); let table_data = self.tables[table_index].as_ref().unwrap(); - let heap_ty = plan.table.wasm_ty.heap_type; + let heap_ty = table.ref_type.heap_type; match heap_ty.top() { // `i31ref`s never need GC barriers, and therefore don't need to go // through the GC compiler. WasmHeapTopType::Any if heap_ty == WasmHeapType::I31 => { - let (addr, flags) = table_data.prepare_table_addr( - builder, - index, - self.pointer_type(), - self.isa.flags().enable_table_access_spectre_mitigation(), - ); + let (addr, flags) = table_data.prepare_table_addr(&*self.isa, builder, index); gc::unbarriered_store_gc_ref(self, builder, WasmHeapType::I31, addr, value, flags) } // GC-managed types. WasmHeapTopType::Any | WasmHeapTopType::Extern => { - let (dst, flags) = table_data.prepare_table_addr( - builder, - index, - self.pointer_type(), - self.isa.flags().enable_table_access_spectre_mitigation(), - ); + let (dst, flags) = table_data.prepare_table_addr(&*self.isa, builder, index); gc::gc_compiler(self).translate_write_gc_reference( self, builder, - plan.table.wasm_ty, + table.ref_type, dst, value, flags, @@ -1667,12 +1661,8 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m WasmHeapTopType::Func => { match plan.style { TableStyle::CallerChecksSignature { lazy_init } => { - let (elem_addr, flags) = table_data.prepare_table_addr( - builder, - index, - pointer_type, - self.isa.flags().enable_table_access_spectre_mitigation(), - ); + let (elem_addr, flags) = + table_data.prepare_table_addr(&*self.isa, builder, index); // Set the "initialized bit". See doc-comment on // `FUNCREF_INIT_BIT` in // crates/environ/src/ref_bits.rs for details. @@ -1701,7 +1691,11 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m val: ir::Value, len: ir::Value, ) -> WasmResult<()> { - let ty = self.module.table_plans[table_index].table.wasm_ty.heap_type; + let table = self.table(table_index); + let index_type = table.idx_type; + let dst = self.cast_index_to_i64(&mut pos, dst, index_type); + let len = self.cast_index_to_i64(&mut pos, len, index_type); + let ty = table.ref_type.heap_type; let libcall = if ty.is_vmgcref_type() { gc::gc_ref_table_fill_builtin(ty, self, &mut pos.func)? } else { @@ -1848,7 +1842,7 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m // integer is the maximum memory64 size (2^64) which is one // larger than `u64::MAX` (2^64 - 1). In this case, just say the // minimum heap size is `u64::MAX`. - debug_assert_eq!(self.module.memory_plans[index].memory.minimum, 1 << 48); + debug_assert_eq!(self.module.memory_plans[index].memory.limits.min, 1 << 48); debug_assert_eq!(self.module.memory_plans[index].memory.page_size(), 1 << 16); u64::MAX }); @@ -2074,7 +2068,7 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m max_size, offset_guard_size, style: heap_style, - index_type: self.memory_index_type(index), + index_type: index_type_to_ir_type(self.memory(index).idx_type), memory_type, page_size_log2, })) @@ -2241,10 +2235,16 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m let memory_index = pos.ins().iconst(I32, index_arg as i64); let vmctx = self.vmctx_val(&mut pos); - let val = self.cast_memory_index_to_i64(&mut pos, val, index); + let index_type = self.memory(index).idx_type; + let val = self.cast_index_to_i64(&mut pos, val, index_type); let call_inst = pos.ins().call(memory_grow, &[vmctx, val, memory_index]); let result = *pos.func.dfg.inst_results(call_inst).first().unwrap(); - Ok(self.convert_memory_length_to_index_type(pos, result, index)) + let single_byte_pages = match self.memory(index).page_size_log2 { + 16 => false, + 0 => true, + _ => unreachable!("only page sizes 2**0 and 2**16 are currently valid"), + }; + Ok(self.convert_pointer_to_index_type(pos, result, index_type, single_byte_pages)) } fn translate_memory_size( @@ -2319,8 +2319,17 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m let page_size_log2 = i64::from(self.module.memory_plans[index].memory.page_size_log2); let current_length_in_pages = pos.ins().ushr_imm(current_length_in_bytes, page_size_log2); - - Ok(self.convert_memory_length_to_index_type(pos, current_length_in_pages, index)) + let single_byte_pages = match page_size_log2 { + 16 => false, + 0 => true, + _ => unreachable!("only page sizes 2**0 and 2**16 are currently valid"), + }; + Ok(self.convert_pointer_to_index_type( + pos, + current_length_in_pages, + self.memory(index).idx_type, + single_byte_pages, + )) } fn translate_memory_copy( @@ -2337,15 +2346,15 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m let vmctx = self.vmctx_val(&mut pos); let memory_copy = self.builtin_functions.memory_copy(&mut pos.func); - let dst = self.cast_memory_index_to_i64(&mut pos, dst, dst_index); - let src = self.cast_memory_index_to_i64(&mut pos, src, src_index); + let dst = self.cast_index_to_i64(&mut pos, dst, self.memory(dst_index).idx_type); + let src = self.cast_index_to_i64(&mut pos, src, self.memory(src_index).idx_type); // The length is 32-bit if either memory is 32-bit, but if they're both // 64-bit then it's 64-bit. Our intrinsic takes a 64-bit length for // compatibility across all memories, so make sure that it's cast // correctly here (this is a bit special so no generic helper unlike for // `dst`/`src` above) - let len = if self.memory_index_type(dst_index) == I64 - && self.memory_index_type(src_index) == I64 + let len = if index_type_to_ir_type(self.memory(dst_index).idx_type) == I64 + && index_type_to_ir_type(self.memory(src_index).idx_type) == I64 { len } else { @@ -2369,8 +2378,8 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m len: ir::Value, ) -> WasmResult<()> { let memory_fill = self.builtin_functions.memory_fill(&mut pos.func); - let dst = self.cast_memory_index_to_i64(&mut pos, dst, memory_index); - let len = self.cast_memory_index_to_i64(&mut pos, len, memory_index); + let dst = self.cast_index_to_i64(&mut pos, dst, self.memory(memory_index).idx_type); + let len = self.cast_index_to_i64(&mut pos, len, self.memory(memory_index).idx_type); let memory_index_arg = pos.ins().iconst(I32, i64::from(memory_index.as_u32())); let vmctx = self.vmctx_val(&mut pos); @@ -2398,7 +2407,7 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m let vmctx = self.vmctx_val(&mut pos); - let dst = self.cast_memory_index_to_i64(&mut pos, dst, memory_index); + let dst = self.cast_index_to_i64(&mut pos, dst, self.memory(memory_index).idx_type); pos.ins().call( memory_init, @@ -2423,7 +2432,8 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m ) -> WasmResult { self.ensure_table_exists(pos.func, table_index); let table_data = self.tables[table_index].as_ref().unwrap(); - Ok(table_data.bound.bound(pos, ir::types::I32)) + let index_type = index_type_to_ir_type(self.table(table_index).idx_type); + Ok(table_data.bound.bound(&*self.isa, pos, index_type)) } fn translate_table_copy( @@ -2438,6 +2448,15 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m let (table_copy, dst_table_index_arg, src_table_index_arg) = self.get_table_copy_func(&mut pos.func, dst_table_index, src_table_index); + let dst = self.cast_index_to_i64(&mut pos, dst, self.table(dst_table_index).idx_type); + let src = self.cast_index_to_i64(&mut pos, src, self.table(src_table_index).idx_type); + let len = if index_type_to_ir_type(self.table(dst_table_index).idx_type) == I64 + && index_type_to_ir_type(self.table(src_table_index).idx_type) == I64 + { + len + } else { + pos.ins().uextend(I64, len) + }; let dst_table_index_arg = pos.ins().iconst(I32, dst_table_index_arg as i64); let src_table_index_arg = pos.ins().iconst(I32, src_table_index_arg as i64); let vmctx = self.vmctx_val(&mut pos); @@ -2469,6 +2488,11 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m let table_index_arg = pos.ins().iconst(I32, i64::from(table_index.as_u32())); let seg_index_arg = pos.ins().iconst(I32, i64::from(seg_index)); let vmctx = self.vmctx_val(&mut pos); + let index_type = self.table(table_index).idx_type; + let dst = self.cast_index_to_i64(&mut pos, dst, index_type); + let src = pos.ins().uextend(I64, src); + let len = pos.ins().uextend(I64, len); + pos.ins().call( table_init, &[vmctx, table_index_arg, seg_index_arg, dst, src, len], @@ -2496,7 +2520,7 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m ) -> WasmResult { #[cfg(feature = "threads")] { - let addr = self.cast_memory_index_to_i64(&mut pos, addr, memory_index); + let addr = self.cast_index_to_i64(&mut pos, addr, self.memory(memory_index).idx_type); let implied_ty = pos.func.dfg.value_type(expected); let (wait_func, memory_index) = self.get_memory_atomic_wait(&mut pos.func, memory_index, implied_ty); @@ -2531,7 +2555,7 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m ) -> WasmResult { #[cfg(feature = "threads")] { - let addr = self.cast_memory_index_to_i64(&mut pos, addr, memory_index); + let addr = self.cast_index_to_i64(&mut pos, addr, self.memory(memory_index).idx_type); let atomic_notify = self.builtin_functions.memory_atomic_notify(&mut pos.func); let memory_index_arg = pos.ins().iconst(I32, memory_index.index() as i64); @@ -2755,3 +2779,14 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m } } } + +// Helper function to convert an `IndexType` to an `ir::Type`. +// +// Implementing From/Into trait for `IndexType` or `ir::Type` would +// introduce an extra dependency between `wasmtime_types` and `cranelift_codegen`. +fn index_type_to_ir_type(index_type: IndexType) -> ir::Type { + match index_type { + IndexType::I32 => I32, + IndexType::I64 => I64, + } +} diff --git a/crates/environ/src/builtin.rs b/crates/environ/src/builtin.rs index e9411ac9ff28..79fdf8e73001 100644 --- a/crates/environ/src/builtin.rs +++ b/crates/environ/src/builtin.rs @@ -7,9 +7,9 @@ macro_rules! foreach_builtin_function { memory32_grow(vmctx: vmctx, delta: i64, index: i32) -> pointer; // Returns an index for wasm's `table.copy` when both tables are locally // defined. - table_copy(vmctx: vmctx, dst_index: i32, src_index: i32, dst: i32, src: i32, len: i32); + table_copy(vmctx: vmctx, dst_index: i32, src_index: i32, dst: i64, src: i64, len: i64); // Returns an index for wasm's `table.init`. - table_init(vmctx: vmctx, table: i32, elem: i32, dst: i32, src: i32, len: i32); + table_init(vmctx: vmctx, table: i32, elem: i32, dst: i64, src: i64, len: i64); // Returns an index for wasm's `elem.drop`. elem_drop(vmctx: vmctx, elem: i32); // Returns an index for wasm's `memory.copy` @@ -23,11 +23,11 @@ macro_rules! foreach_builtin_function { // Returns an index for wasm's `data.drop` instruction. data_drop(vmctx: vmctx, data: i32); // Returns a table entry after lazily initializing it. - table_get_lazy_init_func_ref(vmctx: vmctx, table: i32, index: i32) -> pointer; + table_get_lazy_init_func_ref(vmctx: vmctx, table: i32, index: i64) -> pointer; // Returns an index for Wasm's `table.grow` instruction for `funcref`s. - table_grow_func_ref(vmctx: vmctx, table: i32, delta: i32, init: pointer) -> i32; + table_grow_func_ref(vmctx: vmctx, table: i32, delta: i64, init: pointer) -> pointer; // Returns an index for Wasm's `table.fill` instruction for `funcref`s. - table_fill_func_ref(vmctx: vmctx, table: i32, dst: i32, val: pointer, len: i32); + table_fill_func_ref(vmctx: vmctx, table: i32, dst: i64, val: pointer, len: i64); // Returns an index for wasm's `memory.atomic.notify` instruction. #[cfg(feature = "threads")] memory_atomic_notify(vmctx: vmctx, memory: i32, addr: i64, count: i32) -> i32; @@ -91,11 +91,11 @@ macro_rules! foreach_builtin_function { // Returns an index for Wasm's `table.grow` instruction for GC references. #[cfg(feature = "gc")] - table_grow_gc_ref(vmctx: vmctx, table: i32, delta: i32, init: reference) -> i32; + table_grow_gc_ref(vmctx: vmctx, table: i32, delta: i64, init: reference) -> pointer; // Returns an index for Wasm's `table.fill` instruction for GC references. #[cfg(feature = "gc")] - table_fill_gc_ref(vmctx: vmctx, table: i32, dst: i32, val: reference, len: i32); + table_fill_gc_ref(vmctx: vmctx, table: i32, dst: i64, val: reference, len: i64); } }; } diff --git a/crates/environ/src/compile/module_environ.rs b/crates/environ/src/compile/module_environ.rs index ed4705d9b52f..a2648ecaaa5b 100644 --- a/crates/environ/src/compile/module_environ.rs +++ b/crates/environ/src/compile/module_environ.rs @@ -21,7 +21,9 @@ use wasmparser::{ FuncToValidate, FunctionBody, KnownCustom, NameSectionReader, Naming, Parser, Payload, TypeRef, Validator, ValidatorResources, }; -use wasmtime_types::{ConstExpr, ConstOp, ModuleInternedTypeIndex, SizeOverflow, WasmHeapTopType}; +use wasmtime_types::{ + ConstExpr, ConstOp, IndexType, ModuleInternedTypeIndex, SizeOverflow, WasmHeapTopType, +}; /// Object containing the standalone environment information. pub struct ModuleEnvironment<'a, 'data> { @@ -954,10 +956,14 @@ impl ModuleTranslation<'_> { } fn eval_offset(&mut self, memory_index: MemoryIndex, expr: &ConstExpr) -> Option { - let mem64 = self.module.memory_plans[memory_index].memory.memory64; - match expr.ops() { - &[ConstOp::I32Const(offset)] if !mem64 => Some(offset.unsigned().into()), - &[ConstOp::I64Const(offset)] if mem64 => Some(offset.unsigned()), + match ( + expr.ops(), + self.module.memory_plans[memory_index].memory.idx_type, + ) { + (&[ConstOp::I32Const(offset)], IndexType::I32) => { + Some(offset.unsigned().into()) + } + (&[ConstOp::I64Const(offset)], IndexType::I64) => Some(offset.unsigned()), _ => None, } } @@ -1137,7 +1143,7 @@ impl ModuleTranslation<'_> { // This should be large enough to support very large Wasm // modules with huge funcref tables, but small enough to avoid // OOMs or DoS on truly sparse tables. - const MAX_FUNC_TABLE_SIZE: u32 = 1024 * 1024; + const MAX_FUNC_TABLE_SIZE: u64 = 1024 * 1024; // First convert any element-initialized tables to images of just that // single function if the minimum size of the table allows doing so. @@ -1153,7 +1159,7 @@ impl ModuleTranslation<'_> { .skip(self.module.num_imported_tables), ) { - let table_size = plan.table.minimum; + let table_size = plan.table.limits.min; if table_size > MAX_FUNC_TABLE_SIZE { continue; } @@ -1194,7 +1200,8 @@ impl ModuleTranslation<'_> { // include it in the statically-built array of initial // contents. let offset = match segment.offset.ops() { - &[ConstOp::I32Const(offset)] => offset.unsigned(), + &[ConstOp::I32Const(offset)] => u64::from(offset.unsigned()), + &[ConstOp::I64Const(offset)] => offset.unsigned(), _ => break, }; @@ -1205,14 +1212,17 @@ impl ModuleTranslation<'_> { Some(top) => top, None => break, }; - let table_size = self.module.table_plans[segment.table_index].table.minimum; + let table_size = self.module.table_plans[segment.table_index] + .table + .limits + .min; if top > table_size || top > MAX_FUNC_TABLE_SIZE { break; } match self.module.table_plans[segment.table_index] .table - .wasm_ty + .ref_type .heap_type .top() { diff --git a/crates/environ/src/component/translate/inline.rs b/crates/environ/src/component/translate/inline.rs index d3a84da55ae2..afb6f629582f 100644 --- a/crates/environ/src/component/translate/inline.rs +++ b/crates/environ/src/component/translate/inline.rs @@ -48,6 +48,7 @@ use crate::component::translate::*; use std::borrow::Cow; use wasmparser::types::{ComponentAnyTypeId, ComponentCoreModuleTypeId}; +use wasmtime_types::IndexType; pub(super) fn run( types: &mut ComponentTypesBuilder, @@ -966,13 +967,19 @@ impl<'a> Inliner<'a> { InstanceModule::Static(idx) => match &memory.item { ExportItem::Index(i) => { let plan = &self.nested_modules[*idx].module.memory_plans[*i]; - plan.memory.memory64 + match plan.memory.idx_type { + IndexType::I32 => false, + IndexType::I64 => true, + } } ExportItem::Name(_) => unreachable!(), }, InstanceModule::Import(ty) => match &memory.item { ExportItem::Name(name) => match types[*ty].exports[name] { - wasmtime_types::EntityType::Memory(m) => m.memory64, + wasmtime_types::EntityType::Memory(m) => match m.idx_type { + IndexType::I32 => false, + IndexType::I64 => true, + }, _ => unreachable!(), }, ExportItem::Index(_) => unreachable!(), diff --git a/crates/environ/src/module.rs b/crates/environ/src/module.rs index e983333c3326..578f3a6bd7c5 100644 --- a/crates/environ/src/module.rs +++ b/crates/environ/src/module.rs @@ -413,10 +413,10 @@ pub enum TableSegmentElements { impl TableSegmentElements { /// Returns the number of elements in this segment. - pub fn len(&self) -> u32 { + pub fn len(&self) -> u64 { match self { - Self::Functions(s) => s.len() as u32, - Self::Expressions(s) => s.len() as u32, + Self::Functions(s) => u64::try_from(s.len()).unwrap(), + Self::Expressions(s) => u64::try_from(s.len()).unwrap(), } } } diff --git a/crates/environ/src/vmoffsets.rs b/crates/environ/src/vmoffsets.rs index 0868629cd362..c6c354d2e690 100644 --- a/crates/environ/src/vmoffsets.rs +++ b/crates/environ/src/vmoffsets.rs @@ -584,7 +584,7 @@ impl VMOffsets

{ /// The size of the `current_elements` field. #[inline] pub fn size_of_vmtable_definition_current_elements(&self) -> u8 { - 4 + self.pointer_size() } /// Return the size of `VMTableDefinition`. diff --git a/crates/fuzzing/src/generators/config.rs b/crates/fuzzing/src/generators/config.rs index d6fd64824af2..66b4abdb8042 100644 --- a/crates/fuzzing/src/generators/config.rs +++ b/crates/fuzzing/src/generators/config.rs @@ -404,11 +404,6 @@ impl<'a> Arbitrary<'a> for Config { config.disable_unimplemented_winch_proposals(); } - // Wasm-smith implements the most up-to-date version of memory64 where - // it supports 64-bit tables as well, but Wasmtime doesn't support that - // yet, so disable the memory64 proposal in fuzzing for now. - config.module_config.config.memory64_enabled = false; - // If using the pooling allocator, constrain the memory and module configurations // to the module limits. if let InstanceAllocationStrategy::Pooling(pooling) = &mut config.wasmtime.strategy { diff --git a/crates/fuzzing/src/generators/pooling_config.rs b/crates/fuzzing/src/generators/pooling_config.rs index 0e327bce3724..46e588ca05b4 100644 --- a/crates/fuzzing/src/generators/pooling_config.rs +++ b/crates/fuzzing/src/generators/pooling_config.rs @@ -14,7 +14,7 @@ pub struct PoolingAllocationConfig { pub total_stacks: u32, pub max_memory_size: usize, - pub table_elements: u32, + pub table_elements: usize, pub component_instance_size: usize, pub max_memories_per_component: u32, @@ -79,7 +79,7 @@ impl<'a> Arbitrary<'a> for PoolingAllocationConfig { const MAX_COUNT: u32 = 100; const MAX_TABLES: u32 = 100; const MAX_MEMORIES: u32 = 100; - const MAX_ELEMENTS: u32 = 1000; + const MAX_ELEMENTS: usize = 1000; const MAX_MEMORY_SIZE: usize = 10 * (1 << 20); // 10 MiB const MAX_SIZE: usize = 1 << 20; // 1 MiB const MAX_INSTANCE_MEMORIES: u32 = 10; diff --git a/crates/fuzzing/src/oracles.rs b/crates/fuzzing/src/oracles.rs index d28ebfd3c9f2..9b60e0da3044 100644 --- a/crates/fuzzing/src/oracles.rs +++ b/crates/fuzzing/src/oracles.rs @@ -116,8 +116,13 @@ impl ResourceLimiter for StoreLimits { Ok(self.alloc(desired - current)) } - fn table_growing(&mut self, current: u32, desired: u32, _maximum: Option) -> Result { - let delta = (desired - current) as usize * std::mem::size_of::(); + fn table_growing( + &mut self, + current: usize, + desired: usize, + _maximum: Option, + ) -> Result { + let delta = (desired - current) * std::mem::size_of::(); Ok(self.alloc(delta)) } } diff --git a/crates/types/src/error.rs b/crates/types/src/error.rs index 37e5043e0e1b..27c25bbcc036 100644 --- a/crates/types/src/error.rs +++ b/crates/types/src/error.rs @@ -1,5 +1,6 @@ -use alloc::string::String; +use alloc::string::{String, ToString}; use core::fmt; +use core::num::TryFromIntError; /// A WebAssembly translation error. /// @@ -52,6 +53,16 @@ impl From for WasmError { } } +impl From for WasmError { + /// Convert from a `TryFromIntError` to a `WasmError`. + fn from(e: TryFromIntError) -> Self { + Self::InvalidWebAssembly { + message: e.to_string(), + offset: 0, + } + } +} + /// A convenient alias for a `Result` that uses `WasmError` as the error type. pub type WasmResult = Result; diff --git a/crates/types/src/lib.rs b/crates/types/src/lib.rs index c5940b3e582c..7e1c69bbc698 100644 --- a/crates/types/src/lib.rs +++ b/crates/types/src/lib.rs @@ -1521,15 +1521,30 @@ impl ConstOp { } } +/// The type that can be used to index into [Memory] and [Table]. +#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq, Serialize, Deserialize)] +pub enum IndexType { + I32, + I64, +} + +/// The size range of resizeable storage associated with [Memory] types and [Table] types. +#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq, Serialize, Deserialize)] +pub struct Limits { + pub min: u64, + pub max: Option, +} + /// WebAssembly table. #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq, Serialize, Deserialize)] pub struct Table { + /// The type of the index used to access the table. + pub idx_type: IndexType, + /// Tables are constrained by limits for their minimum and optionally maximum size. + /// The limits are given in numbers of entries. + pub limits: Limits, /// The table elements' Wasm type. - pub wasm_ty: WasmRefType, - /// The minimum number of elements in the table. - pub minimum: u32, - /// The maximum number of elements in the table. - pub maximum: Option, + pub ref_type: WasmRefType, } impl TypeTrace for Table { @@ -1538,9 +1553,9 @@ impl TypeTrace for Table { F: FnMut(EngineOrModuleTypeIndex) -> Result<(), E>, { let Table { - wasm_ty, - minimum: _, - maximum: _, + ref_type: wasm_ty, + idx_type: _, + limits: _, } = self; wasm_ty.trace(func) } @@ -1550,9 +1565,9 @@ impl TypeTrace for Table { F: FnMut(&mut EngineOrModuleTypeIndex) -> Result<(), E>, { let Table { - wasm_ty, - minimum: _, - maximum: _, + ref_type: wasm_ty, + idx_type: _, + limits: _, } = self; wasm_ty.trace_mut(func) } @@ -1561,14 +1576,13 @@ impl TypeTrace for Table { /// WebAssembly linear memory. #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq, Serialize, Deserialize)] pub struct Memory { - /// The minimum number of pages in the memory. - pub minimum: u64, - /// The maximum number of pages in the memory. - pub maximum: Option, + /// The type of the index used to access the memory. + pub idx_type: IndexType, + /// The limits constrain the minimum and optionally the maximum size of a memory. + /// The limits are given in units of page size. + pub limits: Limits, /// Whether the memory may be shared between multiple threads. pub shared: bool, - /// Whether or not this is a 64-bit memory - pub memory64: bool, /// The log2 of this memory's page size, in bytes. /// /// By default the page size is 64KiB (0x10000; 2**16; 1<<16; 65536) but the @@ -1598,7 +1612,8 @@ impl Memory { /// `u64` return type. This means that the memory can't be allocated but /// it's deferred to the caller to how to deal with that. pub fn minimum_byte_size(&self) -> Result { - self.minimum + self.limits + .min .checked_mul(self.page_size()) .ok_or(SizeOverflow) } @@ -1618,7 +1633,7 @@ impl Memory { /// `u64` return type. This means that the memory can't be allocated but /// it's deferred to the caller to how to deal with that. pub fn maximum_byte_size(&self) -> Result { - match self.maximum { + match self.limits.max { Some(max) => max.checked_mul(self.page_size()).ok_or(SizeOverflow), None => { let min = self.minimum_byte_size()?; @@ -1642,7 +1657,8 @@ impl Memory { /// /// For example 32-bit linear memories return `1<<32` from this method. pub fn max_size_based_on_index_type(&self) -> u64 { - if self.memory64 { + match self.idx_type { + IndexType::I64 => // Note that the true maximum size of a 64-bit linear memory, in // bytes, cannot be represented in a `u64`. That would require a u65 // to store `1<<64`. Despite that no system can actually allocate a @@ -1650,9 +1666,10 @@ impl Memory { // the kernel fit in a single Wasm page of linear memory". Shouldn't // ever actually be possible but it provides a number to serve as an // effective maximum. - 0_u64.wrapping_sub(self.page_size()) - } else { - WASM32_MAX_SIZE + { + 0_u64.wrapping_sub(self.page_size()) + } + IndexType::I32 => WASM32_MAX_SIZE, } } } @@ -1671,16 +1688,23 @@ impl std::error::Error for SizeOverflow {} impl From for Memory { fn from(ty: wasmparser::MemoryType) -> Memory { + let idx_type = match ty.memory64 { + false => IndexType::I32, + true => IndexType::I64, + }; + let limits = Limits { + min: ty.initial, + max: ty.maximum, + }; let page_size_log2 = u8::try_from(ty.page_size_log2.unwrap_or(16)).unwrap(); debug_assert!( page_size_log2 == 16 || page_size_log2 == 0, "invalid page_size_log2: {page_size_log2}; must be 16 or 0" ); Memory { - minimum: ty.initial, - maximum: ty.maximum, + idx_type, + limits, shared: ty.shared, - memory64: ty.memory64, page_size_log2, } } @@ -1715,13 +1739,18 @@ pub trait TypeConvert { /// Converts a wasmparser table type into a wasmtime type fn convert_table_type(&self, ty: &wasmparser::TableType) -> WasmResult { - if ty.table64 { - return Err(wasm_unsupported!("wasm memory64: 64-bit table type")); - } + let idx_type = match ty.table64 { + false => IndexType::I32, + true => IndexType::I64, + }; + let limits = Limits { + min: ty.initial.try_into().unwrap(), + max: ty.maximum.map(|i| i.try_into().unwrap()), + }; Ok(Table { - wasm_ty: self.convert_ref_type(ty.element_type), - minimum: ty.initial.try_into().unwrap(), - maximum: ty.maximum.map(|i| i.try_into().unwrap()), + idx_type, + limits, + ref_type: self.convert_ref_type(ty.element_type), }) } diff --git a/crates/wasmtime/src/config.rs b/crates/wasmtime/src/config.rs index bcacf551bd2b..929f64e572fc 100644 --- a/crates/wasmtime/src/config.rs +++ b/crates/wasmtime/src/config.rs @@ -2813,7 +2813,7 @@ impl PoolingAllocationConfig { /// table; table elements are pointer-sized in the Wasmtime runtime. /// Therefore, the space reserved for each instance is `tables * /// table_elements * sizeof::<*const ()>`. - pub fn table_elements(&mut self, elements: u32) -> &mut Self { + pub fn table_elements(&mut self, elements: usize) -> &mut Self { self.config.limits.table_elements = elements; self } diff --git a/crates/wasmtime/src/runtime/externals/table.rs b/crates/wasmtime/src/runtime/externals/table.rs index bea26c42a674..a839a20cd59c 100644 --- a/crates/wasmtime/src/runtime/externals/table.rs +++ b/crates/wasmtime/src/runtime/externals/table.rs @@ -135,7 +135,7 @@ impl Table { fn wasmtime_table( &self, store: &mut StoreOpaque, - lazy_init_range: impl Iterator, + lazy_init_range: impl Iterator, ) -> *mut runtime::Table { unsafe { let export = &store[self.0]; @@ -153,7 +153,7 @@ impl Table { /// # Panics /// /// Panics if `store` does not own this table. - pub fn get(&self, mut store: impl AsContextMut, index: u32) -> Option { + pub fn get(&self, mut store: impl AsContextMut, index: u64) -> Option { let mut store = AutoAssertNoGc::new(store.as_context_mut().0); let table = self.wasmtime_table(&mut store, iter::once(index)); unsafe { @@ -203,7 +203,7 @@ impl Table { /// # Panics /// /// Panics if `store` does not own this table. - pub fn set(&self, mut store: impl AsContextMut, index: u32, val: Ref) -> Result<()> { + pub fn set(&self, mut store: impl AsContextMut, index: u64, val: Ref) -> Result<()> { let store = store.as_context_mut().0; let ty = self.ty(&store); let val = val.into_table_element(store, ty.element())?; @@ -220,12 +220,14 @@ impl Table { /// # Panics /// /// Panics if `store` does not own this table. - pub fn size(&self, store: impl AsContext) -> u32 { + pub fn size(&self, store: impl AsContext) -> u64 { self.internal_size(store.as_context().0) } - pub(crate) fn internal_size(&self, store: &StoreOpaque) -> u32 { - unsafe { (*store[self.0].definition).current_elements } + pub(crate) fn internal_size(&self, store: &StoreOpaque) -> u64 { + // unwrap here should be ok because the runtime should always guarantee + // that we can fit the number of elements in a 64-bit integer. + unsafe { u64::try_from((*store[self.0].definition).current_elements).unwrap() } } /// Grows the size of this table by `delta` more elements, initialization @@ -249,7 +251,7 @@ impl Table { /// (see also: [`Store::limiter_async`](`crate::Store::limiter_async`)). /// When using an async resource limiter, use [`Table::grow_async`] /// instead. - pub fn grow(&self, mut store: impl AsContextMut, delta: u32, init: Ref) -> Result { + pub fn grow(&self, mut store: impl AsContextMut, delta: u64, init: Ref) -> Result { let store = store.as_context_mut().0; let ty = self.ty(&store); let init = init.into_table_element(store, ty.element())?; @@ -259,7 +261,9 @@ impl Table { Some(size) => { let vm = (*table).vmtable(); *store[self.0].definition = vm; - Ok(size) + // unwrap here should be ok because the runtime should always guarantee + // that we can fit the table size in a 64-bit integer. + Ok(u64::try_from(size).unwrap()) } None => bail!("failed to grow table by `{}`", delta), } @@ -277,9 +281,9 @@ impl Table { pub async fn grow_async( &self, mut store: impl AsContextMut, - delta: u32, + delta: u64, init: Ref, - ) -> Result + ) -> Result where T: Send, { @@ -308,10 +312,10 @@ impl Table { pub fn copy( mut store: impl AsContextMut, dst_table: &Table, - dst_index: u32, + dst_index: u64, src_table: &Table, - src_index: u32, - len: u32, + src_index: u64, + len: u64, ) -> Result<()> { let store = store.as_context_mut().0; @@ -326,7 +330,7 @@ impl Table { )?; let dst_table = dst_table.wasmtime_table(store, iter::empty()); - let src_range = src_index..(src_index.checked_add(len).unwrap_or(u32::MAX)); + let src_range = src_index..(src_index.checked_add(len).unwrap_or(u64::MAX)); let src_table = src_table.wasmtime_table(store, src_range); unsafe { runtime::Table::copy( @@ -358,7 +362,7 @@ impl Table { /// # Panics /// /// Panics if `store` does not own either `dst_table` or `src_table`. - pub fn fill(&self, mut store: impl AsContextMut, dst: u32, val: Ref, len: u32) -> Result<()> { + pub fn fill(&self, mut store: impl AsContextMut, dst: u64, val: Ref, len: u64) -> Result<()> { let store = store.as_context_mut().0; let ty = self.ty(&store); let val = val.into_table_element(store, ty.element())?; @@ -402,7 +406,7 @@ impl Table { wasmtime_export .table .table - .wasm_ty + .ref_type .canonicalize_for_runtime_usage(&mut |module_index| { crate::runtime::vm::Instance::from_vmctx(wasmtime_export.vmctx, |instance| { instance.engine_type_index(module_index) diff --git a/crates/wasmtime/src/runtime/limits.rs b/crates/wasmtime/src/runtime/limits.rs index 76faf312cd21..a3d3bad646ca 100644 --- a/crates/wasmtime/src/runtime/limits.rs +++ b/crates/wasmtime/src/runtime/limits.rs @@ -103,7 +103,12 @@ pub trait ResourceLimiter { /// /// See the details on the return values for `memory_growing` for what the /// return value of this function indicates. - fn table_growing(&mut self, current: u32, desired: u32, maximum: Option) -> Result; + fn table_growing( + &mut self, + current: usize, + desired: usize, + maximum: Option, + ) -> Result; /// Notifies the resource limiter that growing a linear memory, permitted by /// the `table_growing` method, has failed. @@ -185,9 +190,9 @@ pub trait ResourceLimiterAsync { /// Asynchronous version of [`ResourceLimiter::table_growing`] async fn table_growing( &mut self, - current: u32, - desired: u32, - maximum: Option, + current: usize, + desired: usize, + maximum: Option, ) -> Result; /// Identical to [`ResourceLimiter::table_grow_failed`] @@ -244,7 +249,7 @@ impl StoreLimitsBuilder { /// they're all allowed to reach up to the `limit` specified. /// /// By default, table elements will not be limited. - pub fn table_elements(mut self, limit: u32) -> Self { + pub fn table_elements(mut self, limit: usize) -> Self { self.0.table_elements = Some(limit); self } @@ -310,7 +315,7 @@ impl StoreLimitsBuilder { #[derive(Clone, Debug)] pub struct StoreLimits { memory_size: Option, - table_elements: Option, + table_elements: Option, instances: usize, tables: usize, memories: usize, @@ -360,7 +365,12 @@ impl ResourceLimiter for StoreLimits { } } - fn table_growing(&mut self, _current: u32, desired: u32, maximum: Option) -> Result { + fn table_growing( + &mut self, + _current: usize, + desired: usize, + maximum: Option, + ) -> Result { let allow = match self.table_elements { Some(limit) if desired > limit => false, _ => match maximum { diff --git a/crates/wasmtime/src/runtime/linker.rs b/crates/wasmtime/src/runtime/linker.rs index bb21e5422c80..28a2c124b47b 100644 --- a/crates/wasmtime/src/runtime/linker.rs +++ b/crates/wasmtime/src/runtime/linker.rs @@ -129,7 +129,7 @@ pub(crate) enum DefinitionType { // information but additionally the current size of the table/memory, as // this is used during linking since the min size specified in the type may // no longer be the current size of the table/memory. - Table(wasmtime_environ::Table, u32), + Table(wasmtime_environ::Table, u64), Memory(wasmtime_environ::Memory, u64), } diff --git a/crates/wasmtime/src/runtime/module.rs b/crates/wasmtime/src/runtime/module.rs index 2b277e254f00..f507aefc11f2 100644 --- a/crates/wasmtime/src/runtime/module.rs +++ b/crates/wasmtime/src/runtime/module.rs @@ -881,14 +881,14 @@ impl Module { .memory_plans .values() .skip(em.num_imported_memories) - .map(|plan| plan.memory.minimum) + .map(|plan| plan.memory.limits.min) .max(); let num_tables = u32::try_from(em.table_plans.len() - em.num_imported_tables).unwrap(); let max_initial_table_size = em .table_plans .values() .skip(em.num_imported_tables) - .map(|plan| plan.table.minimum) + .map(|plan| plan.table.limits.min) .max(); ResourcesRequired { num_memories, diff --git a/crates/wasmtime/src/runtime/resources.rs b/crates/wasmtime/src/runtime/resources.rs index 540f7b37b299..b9f7a6b21418 100644 --- a/crates/wasmtime/src/runtime/resources.rs +++ b/crates/wasmtime/src/runtime/resources.rs @@ -17,7 +17,7 @@ pub struct ResourcesRequired { /// The number of tables that are required. pub num_tables: u32, /// The maximum initial size required by any table. - pub max_initial_table_size: Option, + pub max_initial_table_size: Option, } impl ResourcesRequired { diff --git a/crates/wasmtime/src/runtime/store.rs b/crates/wasmtime/src/runtime/store.rs index 9d4e1215b2e9..60873e019e08 100644 --- a/crates/wasmtime/src/runtime/store.rs +++ b/crates/wasmtime/src/runtime/store.rs @@ -2479,9 +2479,9 @@ unsafe impl crate::runtime::vm::Store for StoreInner { fn table_growing( &mut self, - current: u32, - desired: u32, - maximum: Option, + current: usize, + desired: usize, + maximum: Option, ) -> Result { // Need to borrow async_cx before the mut borrow of the limiter. // self.async_cx() panicks when used with a non-async store, so diff --git a/crates/wasmtime/src/runtime/trampoline/table.rs b/crates/wasmtime/src/runtime/trampoline/table.rs index d96d6a2950bf..cbf580b578dc 100644 --- a/crates/wasmtime/src/runtime/trampoline/table.rs +++ b/crates/wasmtime/src/runtime/trampoline/table.rs @@ -11,9 +11,9 @@ pub fn create_table(store: &mut StoreOpaque, table: &TableType) -> Result) -> TableType { - let wasm_ty = element.to_wasm_type(); + let ref_type = element.to_wasm_type(); debug_assert!( - wasm_ty.is_canonicalized_for_runtime_usage(), - "should be canonicalized for runtime usage: {wasm_ty:?}" + ref_type.is_canonicalized_for_runtime_usage(), + "should be canonicalized for runtime usage: {ref_type:?}" + ); + + let limits = Limits { + min: u64::from(min), + max: max.map(|x| u64::from(x)), + }; + + TableType { + element, + ty: Table { + idx_type: IndexType::I32, + limits, + ref_type, + }, + } + } + + /// Crates a new descriptor for a 64-bit table. + /// + /// Note that 64-bit tables are part of the memory64 proposal for + /// WebAssembly which is not standardized yet. + pub fn new64(element: RefType, min: u64, max: Option) -> TableType { + let ref_type = element.to_wasm_type(); + + debug_assert!( + ref_type.is_canonicalized_for_runtime_usage(), + "should be canonicalized for runtime usage: {ref_type:?}" ); TableType { element, ty: Table { - wasm_ty, - minimum: min, - maximum: max, + ref_type, + idx_type: IndexType::I64, + limits: Limits { min, max }, }, } } + /// Returns whether or not this table is a 64-bit table. + /// + /// Note that 64-bit tables are part of the memory64 proposal for + /// WebAssembly which is not standardized yet. + pub fn is_64(&self) -> bool { + matches!(self.ty.idx_type, IndexType::I64) + } + /// Returns the element value type of this table. pub fn element(&self) -> &RefType { &self.element } /// Returns minimum number of elements this table must have - pub fn minimum(&self) -> u32 { - self.ty.minimum + pub fn minimum(&self) -> u64 { + self.ty.limits.min } /// Returns the optionally-specified maximum number of elements this table /// can have. /// /// If this returns `None` then the table is not limited in size. - pub fn maximum(&self) -> Option { - self.ty.maximum + pub fn maximum(&self) -> Option { + self.ty.limits.max } pub(crate) fn from_wasmtime_table(engine: &Engine, table: &Table) -> TableType { - let element = RefType::from_wasm_type(engine, &table.wasm_ty); + let element = RefType::from_wasm_type(engine, &table.ref_type); TableType { element, ty: *table, @@ -2540,10 +2575,9 @@ impl Default for MemoryTypeBuilder { fn default() -> Self { MemoryTypeBuilder { ty: Memory { - minimum: 0, - maximum: None, + idx_type: IndexType::I32, + limits: Limits { min: 0, max: None }, shared: false, - memory64: false, page_size_log2: Memory::DEFAULT_PAGE_SIZE_LOG2, }, } @@ -2552,7 +2586,12 @@ impl Default for MemoryTypeBuilder { impl MemoryTypeBuilder { fn validate(&self) -> Result<()> { - if self.ty.maximum.map_or(false, |max| max < self.ty.minimum) { + if self + .ty + .limits + .max + .map_or(false, |max| max < self.ty.limits.min) + { bail!("maximum page size cannot be smaller than the minimum page size"); } @@ -2565,7 +2604,7 @@ impl MemoryTypeBuilder { ), } - if self.ty.shared && self.ty.maximum.is_none() { + if self.ty.shared && self.ty.limits.max.is_none() { bail!("shared memories must have a maximum size"); } @@ -2594,7 +2633,7 @@ impl MemoryTypeBuilder { /// /// The default minimum is `0`. pub fn min(&mut self, minimum: u64) -> &mut Self { - self.ty.minimum = minimum; + self.ty.limits.min = minimum; self } @@ -2603,7 +2642,7 @@ impl MemoryTypeBuilder { /// /// The default maximum is `None`. pub fn max(&mut self, maximum: Option) -> &mut Self { - self.ty.maximum = maximum; + self.ty.limits.max = maximum; self } @@ -2617,7 +2656,10 @@ impl MemoryTypeBuilder { /// proposal](https://github.com/WebAssembly/memory64) for WebAssembly which /// is not fully standardized yet. pub fn memory64(&mut self, memory64: bool) -> &mut Self { - self.ty.memory64 = memory64; + self.ty.idx_type = match memory64 { + true => IndexType::I64, + false => IndexType::I32, + }; self } @@ -2757,7 +2799,7 @@ impl MemoryType { /// Note that 64-bit memories are part of the memory64 proposal for /// WebAssembly which is not standardized yet. pub fn is_64(&self) -> bool { - self.ty.memory64 + matches!(self.ty.idx_type, IndexType::I64) } /// Returns whether this is a shared memory or not. @@ -2773,7 +2815,7 @@ impl MemoryType { /// Note that the return value, while a `u64`, will always fit into a `u32` /// for 32-bit memories. pub fn minimum(&self) -> u64 { - self.ty.minimum + self.ty.limits.min } /// Returns the optionally-specified maximum number of pages this memory @@ -2784,7 +2826,7 @@ impl MemoryType { /// Note that the return value, while a `u64`, will always fit into a `u32` /// for 32-bit memories. pub fn maximum(&self) -> Option { - self.ty.maximum + self.ty.limits.max } /// This memory's page size, in bytes. diff --git a/crates/wasmtime/src/runtime/types/matching.rs b/crates/wasmtime/src/runtime/types/matching.rs index 3d675dc67c7c..96e9c36bb00b 100644 --- a/crates/wasmtime/src/runtime/types/matching.rs +++ b/crates/wasmtime/src/runtime/types/matching.rs @@ -2,8 +2,8 @@ use crate::type_registry::RegisteredType; use crate::{linker::DefinitionType, Engine, FuncType}; use crate::{prelude::*, ArrayType, StructType}; use wasmtime_environ::{ - EntityType, Global, Memory, ModuleTypes, Table, TypeTrace, VMSharedTypeIndex, WasmHeapType, - WasmRefType, WasmSubType, WasmValType, + EntityType, Global, IndexType, Memory, ModuleTypes, Table, TypeTrace, VMSharedTypeIndex, + WasmHeapType, WasmRefType, WasmSubType, WasmValType, }; pub struct MatchCx<'a> { @@ -151,17 +151,18 @@ fn global_ty(expected: &Global, actual: &Global) -> Result<()> { Ok(()) } -fn table_ty(expected: &Table, actual: &Table, actual_runtime_size: Option) -> Result<()> { +fn table_ty(expected: &Table, actual: &Table, actual_runtime_size: Option) -> Result<()> { equal_ty( - WasmValType::Ref(expected.wasm_ty), - WasmValType::Ref(actual.wasm_ty), + WasmValType::Ref(expected.ref_type), + WasmValType::Ref(actual.ref_type), "table", )?; + match_index(expected.idx_type, actual.idx_type, "table")?; match_limits( - expected.minimum.into(), - expected.maximum.map(|i| i.into()), - actual_runtime_size.unwrap_or(actual.minimum).into(), - actual.maximum.map(|i| i.into()), + expected.limits.min, + expected.limits.max, + actual_runtime_size.unwrap_or(actual.limits.min), + actual.limits.max, "table", )?; Ok(()) @@ -175,18 +176,12 @@ fn memory_ty(expected: &Memory, actual: &Memory, actual_runtime_size: Option Result<()> { + if expected == actual { + return Ok(()); + } + const S64: &str = "64-bit"; + const S32: &str = "32-bit"; + let expected = if matches!(expected, IndexType::I64) { + S64 + } else { + S32 + }; + let actual = if matches!(actual, IndexType::I64) { + S64 + } else { + S32 + }; + bail!( + "{desc} types incompatible: expected {expected} {desc}, \ + found {actual} {desc}", + ) +} + fn match_limits( expected_min: u64, expected_max: Option, diff --git a/crates/wasmtime/src/runtime/vm.rs b/crates/wasmtime/src/runtime/vm.rs index 21a274f7cdb6..5defd141d9dd 100644 --- a/crates/wasmtime/src/runtime/vm.rs +++ b/crates/wasmtime/src/runtime/vm.rs @@ -133,9 +133,9 @@ pub unsafe trait Store { /// table grow operation. fn table_growing( &mut self, - current: u32, - desired: u32, - maximum: Option, + current: usize, + desired: usize, + maximum: Option, ) -> Result; /// Callback invoked to notify the store's resource limiter that a table diff --git a/crates/wasmtime/src/runtime/vm/cow.rs b/crates/wasmtime/src/runtime/vm/cow.rs index a2d9f1afb77d..392ff6c1fb58 100644 --- a/crates/wasmtime/src/runtime/vm/cow.rs +++ b/crates/wasmtime/src/runtime/vm/cow.rs @@ -756,7 +756,7 @@ mod test { use crate::runtime::vm::mmap::Mmap; use crate::runtime::vm::sys::vm::decommit_pages; use std::sync::Arc; - use wasmtime_environ::Memory; + use wasmtime_environ::{IndexType, Limits, Memory}; fn create_memfd_with_data(offset: usize, data: &[u8]) -> Result { // Offset must be page-aligned. @@ -778,10 +778,9 @@ mod test { MemoryPlan { style, memory: Memory { - minimum: 0, - maximum: None, + idx_type: IndexType::I32, + limits: Limits { min: 0, max: None }, shared: false, - memory64: false, page_size_log2: Memory::DEFAULT_PAGE_SIZE_LOG2, }, pre_guard_size: 0, diff --git a/crates/wasmtime/src/runtime/vm/instance.rs b/crates/wasmtime/src/runtime/vm/instance.rs index bdf52e3d2432..c462e405c55c 100644 --- a/crates/wasmtime/src/runtime/vm/instance.rs +++ b/crates/wasmtime/src/runtime/vm/instance.rs @@ -198,7 +198,7 @@ impl Instance { let size = memory_plans .iter() .next() - .map(|plan| plan.1.memory.minimum) + .map(|plan| plan.1.memory.limits.min) .unwrap_or(0) * 64 * 1024; @@ -661,9 +661,9 @@ impl Instance { pub(crate) fn table_grow( &mut self, table_index: TableIndex, - delta: u32, + delta: u64, init_value: TableElement, - ) -> Result, Error> { + ) -> Result, Error> { self.with_defined_table_index_and_instance(table_index, |i, instance| { instance.defined_table_grow(i, delta, init_value) }) @@ -672,9 +672,9 @@ impl Instance { fn defined_table_grow( &mut self, table_index: DefinedTableIndex, - delta: u32, + delta: u64, init_value: TableElement, - ) -> Result, Error> { + ) -> Result, Error> { let store = unsafe { &mut *self.store() }; let table = &mut self .tables @@ -807,9 +807,9 @@ impl Instance { &mut self, table_index: TableIndex, elem_index: ElemIndex, - dst: u32, - src: u32, - len: u32, + dst: u64, + src: u64, + len: u64, ) -> Result<(), Trap> { // TODO: this `clone()` shouldn't be necessary but is used for now to // inform `rustc` that the lifetime of the elements here are @@ -837,9 +837,9 @@ impl Instance { const_evaluator: &mut ConstExprEvaluator, table_index: TableIndex, elements: &TableSegmentElements, - dst: u32, - src: u32, - len: u32, + dst: u64, + src: u64, + len: u64, ) -> Result<(), Trap> { // https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-table-init @@ -869,7 +869,7 @@ impl Instance { let mut context = ConstEvalContext::new(self, &module); match module.table_plans[table_index] .table - .wasm_ty + .ref_type .heap_type .top() { @@ -1075,7 +1075,7 @@ impl Instance { pub(crate) fn get_table_with_lazy_init( &mut self, table_index: TableIndex, - range: impl Iterator, + range: impl Iterator, ) -> *mut Table { self.with_defined_table_index_and_instance(table_index, |idx, instance| { instance.get_defined_table_with_lazy_init(idx, range) @@ -1089,7 +1089,7 @@ impl Instance { pub fn get_defined_table_with_lazy_init( &mut self, idx: DefinedTableIndex, - range: impl Iterator, + range: impl Iterator, ) -> *mut Table { let elt_ty = self.tables[idx].1.element_type(); @@ -1122,7 +1122,8 @@ impl Instance { TableInitialValue::Null { precomputed } => precomputed, TableInitialValue::Expr(_) => unreachable!(), }; - let func_index = precomputed.get(i as usize).cloned(); + // Panicking here helps catch bugs rather than silently truncating by accident. + let func_index = precomputed.get(usize::try_from(i).unwrap()).cloned(); let func_ref = func_index .and_then(|func_index| self.get_func_ref(func_index)) .unwrap_or(ptr::null_mut()); @@ -1384,7 +1385,7 @@ impl InstanceHandle { pub fn get_defined_table_with_lazy_init( &mut self, index: DefinedTableIndex, - range: impl Iterator, + range: impl Iterator, ) -> *mut Table { let index = self.instance().module().table_index(index); self.instance_mut().get_table_with_lazy_init(index, range) diff --git a/crates/wasmtime/src/runtime/vm/instance/allocator.rs b/crates/wasmtime/src/runtime/vm/instance/allocator.rs index 61cb6fe4c1f4..9a5dd767bead 100644 --- a/crates/wasmtime/src/runtime/vm/instance/allocator.rs +++ b/crates/wasmtime/src/runtime/vm/instance/allocator.rs @@ -543,7 +543,7 @@ fn check_table_init_bounds(instance: &mut Instance, module: &Module) -> Result<( let end = start.checked_add(usize::try_from(segment.elements.len()).unwrap()); match end { - Some(end) if end <= table.size() as usize => { + Some(end) if end <= table.size() => { // Initializer is in bounds } _ => { @@ -572,7 +572,7 @@ fn initialize_tables(instance: &mut Instance, module: &Module) -> Result<()> { }; let idx = module.table_index(table); let table = unsafe { instance.get_defined_table(table).as_mut().unwrap() }; - match module.table_plans[idx].table.wasm_ty.heap_type.top() { + match module.table_plans[idx].table.ref_type.heap_type.top() { WasmHeapTopType::Extern => { let gc_ref = VMGcRef::from_raw_u32(raw.get_externref()); let gc_store = unsafe { (*instance.store()).gc_store() }; @@ -618,7 +618,7 @@ fn initialize_tables(instance: &mut Instance, module: &Module) -> Result<()> { &mut const_evaluator, segment.table_index, &segment.elements, - start.get_u32(), + start.get_u64(), 0, segment.elements.len(), ) @@ -633,16 +633,15 @@ fn get_memory_init_start( instance: &mut Instance, module: &Module, ) -> Result { - let mem64 = instance.module().memory_plans[init.memory_index] - .memory - .memory64; let mut context = ConstEvalContext::new(instance, module); let mut const_evaluator = ConstExprEvaluator::default(); unsafe { const_evaluator.eval(&mut context, &init.offset) }.map(|v| { - if mem64 { - v.get_u64() - } else { - v.get_u32().into() + match instance.module().memory_plans[init.memory_index] + .memory + .idx_type + { + wasmtime_environ::IndexType::I32 => v.get_u32().into(), + wasmtime_environ::IndexType::I64 => v.get_u64(), } }) } @@ -703,15 +702,15 @@ fn initialize_memories(instance: &mut Instance, module: &Module) -> Result<()> { memory: wasmtime_environ::MemoryIndex, expr: &wasmtime_environ::ConstExpr, ) -> Option { - let mem64 = self.instance.module().memory_plans[memory].memory.memory64; let mut context = ConstEvalContext::new(self.instance, self.module); let val = unsafe { self.const_evaluator.eval(&mut context, expr) } .expect("const expression should be valid"); - Some(if mem64 { - val.get_u64() - } else { - val.get_u32().into() - }) + Some( + match self.instance.module().memory_plans[memory].memory.idx_type { + wasmtime_environ::IndexType::I32 => val.get_u32().into(), + wasmtime_environ::IndexType::I64 => val.get_u64(), + }, + ) } fn write( diff --git a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling.rs b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling.rs index 14c5e94ab302..85f89d2432d6 100644 --- a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling.rs +++ b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling.rs @@ -131,7 +131,7 @@ pub struct InstanceLimits { pub max_tables_per_module: u32, /// Maximum number of table elements per table. - pub table_elements: u32, + pub table_elements: usize, /// Maximum number of linear memories per instance. pub max_memories_per_module: u32, diff --git a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/table_pool.rs b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/table_pool.rs index c67d43214b63..b15d7301e285 100644 --- a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/table_pool.rs +++ b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/table_pool.rs @@ -34,7 +34,7 @@ impl TablePool { let table_size = round_up_to_pow2( mem::size_of::<*mut u8>() - .checked_mul(config.limits.table_elements as usize) + .checked_mul(config.limits.table_elements) .ok_or_else(|| anyhow!("table size exceeds addressable memory"))?, page_size, ); @@ -82,11 +82,11 @@ impl TablePool { } for (i, plan) in module.table_plans.iter().skip(module.num_imported_tables) { - if plan.table.minimum > u32::try_from(self.table_elements).unwrap() { + if plan.table.limits.min > u64::try_from(self.table_elements)? { bail!( "table index {} has a minimum element size of {} which exceeds the limit of {}", i.as_u32(), - plan.table.minimum, + plan.table.limits.min, self.table_elements, ); } @@ -192,10 +192,7 @@ impl TablePool { assert!(table.is_static()); let base = self.get(allocation_index); - let size = round_up_to_pow2( - table.size() as usize * mem::size_of::<*mut u8>(), - self.page_size, - ); + let size = round_up_to_pow2(table.size() * mem::size_of::<*mut u8>(), self.page_size); // `memset` the first `keep_resident` bytes. let size_to_memset = size.min(self.keep_resident); diff --git a/crates/wasmtime/src/runtime/vm/libcalls.rs b/crates/wasmtime/src/runtime/vm/libcalls.rs index c976e28f5bf1..d80fb39d28bf 100644 --- a/crates/wasmtime/src/runtime/vm/libcalls.rs +++ b/crates/wasmtime/src/runtime/vm/libcalls.rs @@ -60,7 +60,7 @@ use crate::runtime::vm::vmcontext::VMFuncRef; use crate::runtime::vm::{Instance, TrapReason, VMGcRef}; #[cfg(feature = "threads")] use core::time::Duration; -use wasmtime_environ::{DataIndex, ElemIndex, FuncIndex, MemoryIndex, TableIndex, Trap, Unsigned}; +use wasmtime_environ::{DataIndex, ElemIndex, FuncIndex, MemoryIndex, TableIndex, Trap}; #[cfg(feature = "wmemcheck")] use wasmtime_wmemcheck::AccessError::{ DoubleMalloc, InvalidFree, InvalidRead, InvalidWrite, OutOfBounds, @@ -205,9 +205,9 @@ fn memory32_grow( unsafe fn table_grow_func_ref( instance: &mut Instance, table_index: u32, - delta: u32, + delta: u64, init_value: *mut u8, -) -> Result { +) -> Result<*mut u8> { let table_index = TableIndex::from_u32(table_index); let element = match instance.table_element_type(table_index) { @@ -215,10 +215,11 @@ unsafe fn table_grow_func_ref( TableElementType::GcRef => unreachable!(), }; - Ok(match instance.table_grow(table_index, delta, element)? { + let result = match instance.table_grow(table_index, delta, element)? { Some(r) => r, - None => (-1_i32).unsigned(), - }) + None => usize::MAX, + }; + Ok(result as *mut _) } /// Implementation of `table.grow` for GC-reference tables. @@ -226,9 +227,9 @@ unsafe fn table_grow_func_ref( unsafe fn table_grow_gc_ref( instance: &mut Instance, table_index: u32, - delta: u32, + delta: u64, init_value: u32, -) -> Result { +) -> Result<*mut u8> { let table_index = TableIndex::from_u32(table_index); let element = match instance.table_element_type(table_index) { @@ -238,19 +239,20 @@ unsafe fn table_grow_gc_ref( .into(), }; - Ok(match instance.table_grow(table_index, delta, element)? { + let result = match instance.table_grow(table_index, delta, element)? { Some(r) => r, - None => (-1_i32).unsigned(), - }) + None => usize::MAX, + }; + Ok(result as *mut _) } /// Implementation of `table.fill` for `funcref`s. unsafe fn table_fill_func_ref( instance: &mut Instance, table_index: u32, - dst: u32, + dst: u64, val: *mut u8, - len: u32, + len: u64, ) -> Result<(), Trap> { let table_index = TableIndex::from_u32(table_index); let table = &mut *instance.get_table(table_index); @@ -267,9 +269,9 @@ unsafe fn table_fill_func_ref( unsafe fn table_fill_gc_ref( instance: &mut Instance, table_index: u32, - dst: u32, + dst: u64, val: u32, - len: u32, + len: u64, ) -> Result<(), Trap> { let table_index = TableIndex::from_u32(table_index); let table = &mut *instance.get_table(table_index); @@ -289,15 +291,15 @@ unsafe fn table_copy( instance: &mut Instance, dst_table_index: u32, src_table_index: u32, - dst: u32, - src: u32, - len: u32, + dst: u64, + src: u64, + len: u64, ) -> Result<(), Trap> { let dst_table_index = TableIndex::from_u32(dst_table_index); let src_table_index = TableIndex::from_u32(src_table_index); let dst_table = instance.get_table(dst_table_index); // Lazy-initialize the whole range in the source table first. - let src_range = src..(src.checked_add(len).unwrap_or(u32::MAX)); + let src_range = src..(src.checked_add(len).unwrap_or(u64::MAX)); let src_table = instance.get_table_with_lazy_init(src_table_index, src_range); let gc_store = (*instance.store()).gc_store(); Table::copy(gc_store, dst_table, src_table, dst, src, len) @@ -308,9 +310,9 @@ fn table_init( instance: &mut Instance, table_index: u32, elem_index: u32, - dst: u32, - src: u32, - len: u32, + dst: u64, + src: u64, + len: u64, ) -> Result<(), Trap> { let table_index = TableIndex::from_u32(table_index); let elem_index = ElemIndex::from_u32(elem_index); @@ -382,7 +384,7 @@ fn data_drop(instance: &mut Instance, data_index: u32) { unsafe fn table_get_lazy_init_func_ref( instance: &mut Instance, table_index: u32, - index: u32, + index: u64, ) -> *mut u8 { let table_index = TableIndex::from_u32(table_index); let table = instance.get_table_with_lazy_init(table_index, core::iter::once(index)); diff --git a/crates/wasmtime/src/runtime/vm/memory.rs b/crates/wasmtime/src/runtime/vm/memory.rs index fe84be375e14..40bf4e5a924a 100644 --- a/crates/wasmtime/src/runtime/vm/memory.rs +++ b/crates/wasmtime/src/runtime/vm/memory.rs @@ -653,7 +653,7 @@ impl Memory { if !store.memory_growing(0, minimum.unwrap_or(absolute_max), maximum)? { bail!( "memory minimum size of {} pages exceeds memory limits", - plan.memory.minimum + plan.memory.limits.min ); } } @@ -664,7 +664,7 @@ impl Memory { let minimum = minimum.ok_or_else(|| { format_err!( "memory minimum size of {} pages exceeds memory limits", - plan.memory.minimum + plan.memory.limits.min ) })?; diff --git a/crates/wasmtime/src/runtime/vm/table.rs b/crates/wasmtime/src/runtime/vm/table.rs index 0e3b566326f7..8f89078082f0 100644 --- a/crates/wasmtime/src/runtime/vm/table.rs +++ b/crates/wasmtime/src/runtime/vm/table.rs @@ -7,13 +7,14 @@ use crate::prelude::*; use crate::runtime::vm::vmcontext::{VMFuncRef, VMTableDefinition}; use crate::runtime::vm::{GcStore, SendSyncPtr, Store, VMGcRef}; -use core::cmp; use core::ops::Range; use core::ptr::{self, NonNull}; use core::slice; +use core::{cmp, usize}; use sptr::Strict; use wasmtime_environ::{ - TablePlan, TableStyle, Trap, WasmHeapTopType, WasmRefType, FUNCREF_INIT_BIT, FUNCREF_MASK, + IndexType, TablePlan, TableStyle, Trap, WasmHeapTopType, WasmRefType, FUNCREF_INIT_BIT, + FUNCREF_MASK, }; /// An element going into or coming out of a table. @@ -160,7 +161,7 @@ pub struct StaticFuncTable { /// maximum size of the table. data: SendSyncPtr<[FuncTableElem]>, /// The current size of the table. - size: u32, + size: usize, /// Whether elements of this table are initialized lazily. lazy_init: bool, } @@ -170,7 +171,7 @@ pub struct StaticGcRefTable { /// maximum size of the table. data: SendSyncPtr<[Option]>, /// The current size of the table. - size: u32, + size: usize, } pub enum DynamicTable { @@ -195,7 +196,7 @@ pub struct DynamicFuncTable { /// vector is the current size of the table. elements: Vec, /// Maximum size that `elements` can grow to. - maximum: Option, + maximum: Option, /// Whether elements of this table are initialized lazily. lazy_init: bool, } @@ -205,7 +206,7 @@ pub struct DynamicGcRefTable { /// vector is the current size of the table. elements: Vec>, /// Maximum size that `elements` can grow to. - maximum: Option, + maximum: Option, } /// Represents an instance's table. @@ -268,19 +269,19 @@ fn wasm_to_table_type(ty: WasmRefType) -> TableElementType { impl Table { /// Create a new dynamic (movable) table instance for the specified table plan. pub fn new_dynamic(plan: &TablePlan, store: &mut dyn Store) -> Result { - Self::limit_new(plan, store)?; - let TableStyle::CallerChecksSignature { lazy_init } = plan.style; - match wasm_to_table_type(plan.table.wasm_ty) { - TableElementType::Func => Ok(Self::from(DynamicFuncTable { - elements: vec![None; usize::try_from(plan.table.minimum).unwrap()], - maximum: plan.table.maximum, - lazy_init, - })), + let (minimum, maximum) = Self::limit_new(plan, store)?; + match wasm_to_table_type(plan.table.ref_type) { + TableElementType::Func => { + let TableStyle::CallerChecksSignature { lazy_init } = plan.style; + Ok(Self::from(DynamicFuncTable { + elements: vec![None; minimum], + maximum, + lazy_init, + })) + } TableElementType::GcRef => Ok(Self::from(DynamicGcRefTable { - elements: (0..usize::try_from(plan.table.minimum).unwrap()) - .map(|_| None) - .collect(), - maximum: plan.table.maximum, + elements: (0..minimum).map(|_| None).collect(), + maximum, })), } } @@ -291,15 +292,11 @@ impl Table { data: SendSyncPtr<[u8]>, store: &mut dyn Store, ) -> Result { - Self::limit_new(plan, store)?; - - let size = plan.table.minimum; - let max = plan - .table - .maximum - .map_or(usize::MAX, |x| usize::try_from(x).unwrap()); + let (minimum, maximum) = Self::limit_new(plan, store)?; + let size = minimum; + let max = maximum.unwrap_or(usize::MAX); - match wasm_to_table_type(plan.table.wasm_ty) { + match wasm_to_table_type(plan.table.ref_type) { TableElementType::Func => { let len = { let data = data.as_non_null().as_ref(); @@ -309,10 +306,10 @@ impl Table { data.len() }; ensure!( - usize::try_from(plan.table.minimum).unwrap() <= len, + usize::try_from(plan.table.limits.min).unwrap() <= len, "initial table size of {} exceeds the pooling allocator's \ configured maximum table size of {len} elements", - plan.table.minimum, + plan.table.limits.min, ); let data = SendSyncPtr::new(NonNull::slice_from_raw_parts( data.as_non_null().cast::(), @@ -334,10 +331,10 @@ impl Table { data.len() }; ensure!( - usize::try_from(plan.table.minimum).unwrap() <= len, + usize::try_from(plan.table.limits.min).unwrap() <= len, "initial table size of {} exceeds the pooling allocator's \ configured maximum table size of {len} elements", - plan.table.minimum, + plan.table.limits.min, ); let data = SendSyncPtr::new(NonNull::slice_from_raw_parts( data.as_non_null().cast::>(), @@ -348,14 +345,45 @@ impl Table { } } - fn limit_new(plan: &TablePlan, store: &mut dyn Store) -> Result<()> { - if !store.table_growing(0, plan.table.minimum, plan.table.maximum)? { + // Calls the `store`'s limiter to optionally prevent the table from being created. + // + // Returns the minimum and maximum size of the table if the table can be created. + fn limit_new(plan: &TablePlan, store: &mut dyn Store) -> Result<(usize, Option)> { + // No matter how the table limits are specified + // The table size is limited by the host's pointer size + let absolute_max = usize::MAX; + + // If the minimum overflows the host's pointer size, then we can't satisfy this request. + // We defer the error to later so the `store` can be informed. + let minimum = usize::try_from(plan.table.limits.min).ok(); + + // The maximum size of the table is limited by: + // * the host's pointer size. + // * the table's maximum size if defined. + // * if the table is 64-bit. + let maximum = match (plan.table.limits.max, plan.table.idx_type) { + (Some(max), _) => usize::try_from(max).ok(), + (None, IndexType::I64) => usize::try_from(u64::MAX).ok(), + (None, IndexType::I32) => usize::try_from(u32::MAX).ok(), + }; + + // Inform the store's limiter what's about to happen. + if !store.table_growing(0, minimum.unwrap_or(absolute_max), maximum)? { bail!( "table minimum size of {} elements exceeds table limits", - plan.table.minimum + plan.table.limits.min ); } - Ok(()) + + // At this point we need to actually handle overflows, so bail out with + // an error if we made it this far. + let minimum = minimum.ok_or_else(|| { + format_err!( + "table minimum size of {} elements exceeds table limits", + plan.table.limits.min + ) + })?; + Ok((minimum, maximum)) } /// Returns the type of the elements in this table. @@ -377,15 +405,13 @@ impl Table { } /// Returns the number of allocated elements. - pub fn size(&self) -> u32 { + pub fn size(&self) -> usize { match self { Table::Static(StaticTable::Func(StaticFuncTable { size, .. })) => *size, Table::Static(StaticTable::GcRef(StaticGcRefTable { size, .. })) => *size, - Table::Dynamic(DynamicTable::Func(DynamicFuncTable { elements, .. })) => { - elements.len().try_into().unwrap() - } + Table::Dynamic(DynamicTable::Func(DynamicFuncTable { elements, .. })) => elements.len(), Table::Dynamic(DynamicTable::GcRef(DynamicGcRefTable { elements, .. })) => { - elements.len().try_into().unwrap() + elements.len() } } } @@ -396,14 +422,10 @@ impl Table { /// /// The runtime maximum may not be equal to the maximum from the table's Wasm type /// when it is being constrained by an instance allocator. - pub fn maximum(&self) -> Option { + pub fn maximum(&self) -> Option { match self { - Table::Static(StaticTable::Func(StaticFuncTable { data, .. })) => { - Some(u32::try_from(data.len()).unwrap()) - } - Table::Static(StaticTable::GcRef(StaticGcRefTable { data, .. })) => { - Some(u32::try_from(data.len()).unwrap()) - } + Table::Static(StaticTable::Func(StaticFuncTable { data, .. })) => Some(data.len()), + Table::Static(StaticTable::GcRef(StaticGcRefTable { data, .. })) => Some(data.len()), Table::Dynamic(DynamicTable::Func(DynamicFuncTable { maximum, .. })) => *maximum, Table::Dynamic(DynamicTable::GcRef(DynamicGcRefTable { maximum, .. })) => *maximum, } @@ -416,7 +438,7 @@ impl Table { /// Panics if the table is not a function table. pub fn init_func( &mut self, - dst: u32, + dst: u64, items: impl ExactSizeIterator, ) -> Result<(), Trap> { let dst = usize::try_from(dst).map_err(|_| Trap::TableOutOfBounds)?; @@ -438,7 +460,7 @@ impl Table { /// Returns a trap error on out-of-bounds accesses. pub fn init_gc_refs( &mut self, - dst: u32, + dst: u64, items: impl ExactSizeIterator>, ) -> Result<(), Trap> { let dst = usize::try_from(dst).map_err(|_| Trap::TableOutOfBounds)?; @@ -465,16 +487,17 @@ impl Table { pub fn fill( &mut self, gc_store: &mut GcStore, - dst: u32, + dst: u64, val: TableElement, - len: u32, + len: u64, ) -> Result<(), Trap> { - let start = dst as usize; + let start = usize::try_from(dst).map_err(|_| Trap::TableOutOfBounds)?; + let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds)?; let end = start - .checked_add(len as usize) + .checked_add(len) .ok_or_else(|| Trap::TableOutOfBounds)?; - if end > self.size() as usize { + if end > self.size() { return Err(Trap::TableOutOfBounds); } @@ -526,10 +549,10 @@ impl Table { /// this unsafety. pub unsafe fn grow( &mut self, - delta: u32, + delta: u64, init_value: TableElement, store: &mut dyn Store, - ) -> Result, Error> { + ) -> Result, Error> { let old_size = self.size(); // Don't try to resize the table if its size isn't changing, just return @@ -537,6 +560,9 @@ impl Table { if delta == 0 { return Ok(Some(old_size)); } + // Cannot return `Trap::TableOutOfBounds` here becase `impl std::error::Error for Trap` is not available in no-std. + let delta = + usize::try_from(delta).map_err(|_| format_err!("delta exceeds host pointer size"))?; let new_size = match old_size.checked_add(delta) { Some(s) => s, @@ -566,17 +592,13 @@ impl Table { match self { Table::Static(StaticTable::Func(StaticFuncTable { data, size, .. })) => { unsafe { - debug_assert!(data.as_ref()[*size as usize..new_size as usize] - .iter() - .all(|x| x.is_none())); + debug_assert!(data.as_ref()[*size..new_size].iter().all(|x| x.is_none())); } *size = new_size; } Table::Static(StaticTable::GcRef(StaticGcRefTable { data, size })) => { unsafe { - debug_assert!(data.as_ref()[*size as usize..new_size as usize] - .iter() - .all(|x| x.is_none())); + debug_assert!(data.as_ref()[*size..new_size].iter().all(|x| x.is_none())); } *size = new_size; } @@ -596,8 +618,14 @@ impl Table { } } - self.fill(store.gc_store(), old_size, init_value, delta) - .expect("table should not be out of bounds"); + // casting to u64 is ok to unwrap + self.fill( + store.gc_store(), + u64::try_from(old_size).unwrap(), + init_value, + u64::try_from(delta).unwrap(), + ) + .expect("table should not be out of bounds"); Ok(Some(old_size)) } @@ -605,7 +633,7 @@ impl Table { /// Get reference to the specified element. /// /// Returns `None` if the index is out of bounds. - pub fn get(&self, gc_store: &mut GcStore, index: u32) -> Option { + pub fn get(&self, gc_store: &mut GcStore, index: u64) -> Option { let index = usize::try_from(index).ok()?; match self.element_type() { TableElementType::Func => { @@ -632,8 +660,8 @@ impl Table { /// # Panics /// /// Panics if `elem` is not of the right type for this table. - pub fn set(&mut self, index: u32, elem: TableElement) -> Result<(), ()> { - let index = usize::try_from(index).map_err(|_| ())?; + pub fn set(&mut self, index: u64, elem: TableElement) -> Result<(), ()> { + let index: usize = index.try_into().map_err(|_| ())?; match elem { TableElement::FuncRef(f) => { let (funcrefs, lazy_init) = self.funcrefs_mut(); @@ -660,12 +688,16 @@ impl Table { gc_store: &mut GcStore, dst_table: *mut Self, src_table: *mut Self, - dst_index: u32, - src_index: u32, - len: u32, + dst_index: u64, + src_index: u64, + len: u64, ) -> Result<(), Trap> { // https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-table-copy + let src_index = usize::try_from(src_index).map_err(|_| Trap::TableOutOfBounds)?; + let dst_index = usize::try_from(dst_index).map_err(|_| Trap::TableOutOfBounds)?; + let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds)?; + if src_index .checked_add(len) .map_or(true, |n| n > (*src_table).size()) @@ -681,8 +713,8 @@ impl Table { "table element type mismatch" ); - let src_range = src_index as usize..src_index as usize + len as usize; - let dst_range = dst_index as usize..dst_index as usize + len as usize; + let src_range = src_index..src_index + len; + let dst_range = dst_index..dst_index + len; // Check if the tables are the same as we cannot mutably borrow and also borrow the same `RefCell` if ptr::eq(dst_table, src_table) { @@ -712,13 +744,13 @@ impl Table { Table::Dynamic(DynamicTable::Func(DynamicFuncTable { elements, .. })) => { VMTableDefinition { base: elements.as_mut_ptr().cast(), - current_elements: elements.len().try_into().unwrap(), + current_elements: elements.len(), } } Table::Dynamic(DynamicTable::GcRef(DynamicGcRefTable { elements, .. })) => { VMTableDefinition { base: elements.as_mut_ptr().cast(), - current_elements: elements.len().try_into().unwrap(), + current_elements: elements.len(), } } } diff --git a/crates/wasmtime/src/runtime/vm/vmcontext.rs b/crates/wasmtime/src/runtime/vm/vmcontext.rs index 5e96bb13c461..ca032b758c88 100644 --- a/crates/wasmtime/src/runtime/vm/vmcontext.rs +++ b/crates/wasmtime/src/runtime/vm/vmcontext.rs @@ -333,7 +333,7 @@ pub struct VMTableDefinition { pub base: *mut u8, /// The current number of elements in the table. - pub current_elements: u32, + pub current_elements: usize, } #[cfg(test)] diff --git a/crates/wast/src/spectest.rs b/crates/wast/src/spectest.rs index f69213af978a..924bf66d1c40 100644 --- a/crates/wast/src/spectest.rs +++ b/crates/wast/src/spectest.rs @@ -70,6 +70,10 @@ pub fn link_spectest( let table = Table::new(&mut *store, ty, Ref::Func(None))?; linker.define(&mut *store, "spectest", "table", table)?; + let ty = TableType::new64(RefType::FUNCREF, 10, Some(20)); + let table = Table::new(&mut *store, ty, Ref::Func(None))?; + linker.define(&mut *store, "spectest", "table64", table)?; + let ty = MemoryType::new(1, Some(2)); let memory = Memory::new(&mut *store, ty)?; linker.define(&mut *store, "spectest", "memory", memory)?; diff --git a/crates/wast/src/wast.rs b/crates/wast/src/wast.rs index d4d285088328..1e6304de4ce7 100644 --- a/crates/wast/src/wast.rs +++ b/crates/wast/src/wast.rs @@ -623,4 +623,5 @@ fn is_matching_assert_invalid_error_message(expected: &str, actual: &str) -> boo // for this scenario || (expected == "unknown global" && actual.contains("global.get of locally defined global")) || (expected == "immutable global" && actual.contains("global is immutable: cannot modify it with `global.set`")) + || (expected == "table size must be at most 2^32-1" && actual.contains("invalid u32 number: constant out of range")) } diff --git a/tests/all/cli_tests.rs b/tests/all/cli_tests.rs index 833efe56ea88..a8743c910785 100644 --- a/tests/all/cli_tests.rs +++ b/tests/all/cli_tests.rs @@ -926,7 +926,7 @@ fn table_growth_failure2() -> Result<()> { assert!(!output.status.success()); let stderr = String::from_utf8_lossy(&output.stderr); assert!( - stderr.contains("forcing a table growth failure to be a trap"), + stderr.contains("forcing trap when growing table to 4294967296 elements"), "bad stderr: {stderr}" ); Ok(()) diff --git a/tests/all/externals.rs b/tests/all/externals.rs index 1b36359c1e5b..43365457e0b2 100644 --- a/tests/all/externals.rs +++ b/tests/all/externals.rs @@ -29,7 +29,8 @@ fn bad_tables() { let ty = TableType::new(RefType::FUNCREF, 0, Some(1)); let t = Table::new(&mut store, ty.clone(), Ref::Func(None)).unwrap(); assert!(t.get(&mut store, 0).is_none()); - assert!(t.get(&mut store, u32::max_value()).is_none()); + assert!(t.get(&mut store, u64::from(u32::MAX)).is_none()); + assert!(t.get(&mut store, u64::MAX).is_none()); // set out of bounds or wrong type let ty = TableType::new(RefType::FUNCREF, 1, Some(1)); diff --git a/tests/all/limits.rs b/tests/all/limits.rs index 9150ce20de78..684d76de35dc 100644 --- a/tests/all/limits.rs +++ b/tests/all/limits.rs @@ -103,7 +103,7 @@ async fn test_limits_async() -> Result<()> { struct LimitsAsync { memory_size: usize, - table_elements: u32, + table_elements: usize, } #[async_trait::async_trait] impl ResourceLimiterAsync for LimitsAsync { @@ -117,9 +117,9 @@ async fn test_limits_async() -> Result<()> { } async fn table_growing( &mut self, - _current: u32, - desired: u32, - _maximum: Option, + _current: usize, + desired: usize, + _maximum: Option, ) -> Result { Ok(desired <= self.table_elements) } @@ -419,9 +419,9 @@ impl ResourceLimiter for MemoryContext { } fn table_growing( &mut self, - _current: u32, - _desired: u32, - _maximum: Option, + _current: usize, + _desired: usize, + _maximum: Option, ) -> Result { Ok(true) } @@ -533,9 +533,9 @@ impl ResourceLimiterAsync for MemoryContext { } async fn table_growing( &mut self, - _current: u32, - _desired: u32, - _maximum: Option, + _current: usize, + _desired: usize, + _maximum: Option, ) -> Result { Ok(true) } @@ -626,8 +626,8 @@ async fn test_custom_memory_limiter_async() -> Result<()> { } struct TableContext { - elements_used: u32, - element_limit: u32, + elements_used: usize, + element_limit: usize, limit_exceeded: bool, } @@ -640,9 +640,14 @@ impl ResourceLimiter for TableContext { ) -> Result { Ok(true) } - fn table_growing(&mut self, current: u32, desired: u32, maximum: Option) -> Result { + fn table_growing( + &mut self, + current: usize, + desired: usize, + maximum: Option, + ) -> Result { // Check if the desired exceeds a maximum (either from Wasm or from the host) - assert!(desired < maximum.unwrap_or(u32::MAX)); + assert!(desired < maximum.unwrap_or(usize::MAX)); assert_eq!(current, self.elements_used); Ok(if desired > self.element_limit { self.limit_exceeded = true; @@ -704,8 +709,8 @@ struct FailureDetector { /// Display impl of most recent call to memory_grow_failed memory_error: Option, /// Arguments of most recent call to table_growing - table_current: u32, - table_desired: u32, + table_current: usize, + table_desired: usize, /// Display impl of most recent call to table_grow_failed table_error: Option, } @@ -725,7 +730,12 @@ impl ResourceLimiter for FailureDetector { self.memory_error = Some(err.to_string()); Ok(()) } - fn table_growing(&mut self, current: u32, desired: u32, _maximum: Option) -> Result { + fn table_growing( + &mut self, + current: usize, + desired: usize, + _maximum: Option, + ) -> Result { self.table_current = current; self.table_desired = desired; Ok(true) @@ -832,9 +842,9 @@ impl ResourceLimiterAsync for FailureDetector { async fn table_growing( &mut self, - current: u32, - desired: u32, - _maximum: Option, + current: usize, + desired: usize, + _maximum: Option, ) -> Result { self.table_current = current; self.table_desired = desired; @@ -941,9 +951,9 @@ impl ResourceLimiter for Panic { } fn table_growing( &mut self, - _current: u32, - _desired: u32, - _maximum: Option, + _current: usize, + _desired: usize, + _maximum: Option, ) -> Result { panic!("resource limiter table growing"); } @@ -960,9 +970,9 @@ impl ResourceLimiterAsync for Panic { } async fn table_growing( &mut self, - _current: u32, - _desired: u32, - _maximum: Option, + _current: usize, + _desired: usize, + _maximum: Option, ) -> Result { panic!("async resource limiter table growing"); } diff --git a/tests/all/memory.rs b/tests/all/memory.rs index 24746f204faf..91865bfdb5b2 100644 --- a/tests/all/memory.rs +++ b/tests/all/memory.rs @@ -375,9 +375,9 @@ fn massive_64_bit_still_limited() -> Result<()> { } fn table_growing( &mut self, - _current: u32, - _request: u32, - _max: Option, + _current: usize, + _request: usize, + _max: Option, ) -> Result { unreachable!() } diff --git a/tests/all/pooling_allocator.rs b/tests/all/pooling_allocator.rs index aa95d0acf5ae..4bf8e0d4c21d 100644 --- a/tests/all/pooling_allocator.rs +++ b/tests/all/pooling_allocator.rs @@ -234,7 +234,7 @@ fn memory_zeroed() -> Result<()> { #[test] #[cfg_attr(miri, ignore)] fn table_limit() -> Result<()> { - const TABLE_ELEMENTS: u32 = 10; + const TABLE_ELEMENTS: usize = 10; let mut pool = crate::small_pool_config(); pool.table_elements(TABLE_ELEMENTS); let mut config = Config::new(); @@ -299,16 +299,16 @@ fn table_limit() -> Result<()> { let table = instance.get_table(&mut store, "t").unwrap(); for i in 0..TABLE_ELEMENTS { - assert_eq!(table.size(&store), i); + assert_eq!(table.size(&store), i as u64); assert_eq!( table .grow(&mut store, 1, Ref::Func(None)) .expect("table should grow"), - i + i as u64 ); } - assert_eq!(table.size(&store), TABLE_ELEMENTS); + assert_eq!(table.size(&store), TABLE_ELEMENTS as u64); assert!(table.grow(&mut store, 1, Ref::Func(None)).is_err()); Ok(()) @@ -527,7 +527,7 @@ fn drop_externref_global_during_module_init() -> Result<()> { Ok(false) } - fn table_growing(&mut self, _: u32, _: u32, _: Option) -> Result { + fn table_growing(&mut self, _: usize, _: usize, _: Option) -> Result { Ok(false) } } diff --git a/tests/disas/icall-loop.wat b/tests/disas/icall-loop.wat index 80d55efdb302..38689687810d 100644 --- a/tests/disas/icall-loop.wat +++ b/tests/disas/icall-loop.wat @@ -29,7 +29,7 @@ ;; gv3 = vmctx ;; gv4 = load.i64 notrap aligned readonly gv3+88 ;; sig0 = (i64 vmctx, i64) -> i32 tail -;; sig1 = (i64 vmctx, i32 uext, i32 uext) -> i64 system_v +;; sig1 = (i64 vmctx, i32 uext, i64) -> i64 system_v ;; fn0 = colocated u1:9 sig1 ;; stack_limit = gv2 ;; @@ -39,39 +39,39 @@ ;; @002b v10 = iconst.i64 0 ;; @002b v7 = load.i64 notrap aligned readonly v0+88 ;; @002b v6 = uextend.i64 v2 -;; v28 = iconst.i64 3 -;; @002b v8 = ishl v6, v28 ; v28 = 3 +;; v29 = iconst.i64 3 +;; @002b v8 = ishl v6, v29 ; v29 = 3 ;; @002b v9 = iadd v7, v8 ;; @002b v11 = select_spectre_guard v5, v10, v9 ; v10 = 0 -;; v29 = iconst.i64 -2 +;; v30 = iconst.i64 -2 ;; @002b v15 = iconst.i32 0 -;; @002b v19 = load.i64 notrap aligned readonly v0+80 -;; @002b v20 = load.i32 notrap aligned readonly v19 +;; @002b v20 = load.i64 notrap aligned readonly v0+80 +;; @002b v21 = load.i32 notrap aligned readonly v20 ;; @0027 jump block2 ;; ;; block2: ;; @002b v12 = load.i64 table_oob aligned table v11 -;; v30 = iconst.i64 -2 -;; v31 = band v12, v30 ; v30 = -2 -;; @002b brif v12, block5(v31), block4 +;; v31 = iconst.i64 -2 +;; v32 = band v12, v31 ; v31 = -2 +;; @002b brif v12, block5(v32), block4 ;; ;; block4 cold: -;; v32 = iconst.i32 0 -;; @002b v17 = call fn0(v0, v32, v2) ; v32 = 0 -;; @002b jump block5(v17) +;; v33 = iconst.i32 0 +;; @002b v18 = call fn0(v0, v33, v6) ; v33 = 0 +;; @002b jump block5(v18) ;; ;; block5(v14: i64): -;; @002b v21 = load.i32 icall_null aligned readonly v14+16 -;; @002b v22 = icmp eq v21, v20 -;; @002b brif v22, block7, block6 +;; @002b v22 = load.i32 icall_null aligned readonly v14+16 +;; @002b v23 = icmp eq v22, v21 +;; @002b brif v23, block7, block6 ;; ;; block6 cold: ;; @002b trap bad_sig ;; ;; block7: -;; @002b v23 = load.i64 notrap aligned readonly v14+8 -;; @002b v24 = load.i64 notrap aligned readonly v14+24 -;; @002b v25 = call_indirect sig0, v23(v24, v0) +;; @002b v24 = load.i64 notrap aligned readonly v14+8 +;; @002b v25 = load.i64 notrap aligned readonly v14+24 +;; @002b v26 = call_indirect sig0, v24(v25, v0) ;; @002e jump block2 ;; } ;; @@ -82,45 +82,45 @@ ;; gv3 = vmctx ;; gv4 = load.i64 notrap aligned readonly gv3+88 ;; sig0 = (i64 vmctx, i64) -> i32 tail -;; sig1 = (i64 vmctx, i32 uext, i32 uext) -> i64 system_v +;; sig1 = (i64 vmctx, i32 uext, i64) -> i64 system_v ;; fn0 = colocated u1:9 sig1 ;; stack_limit = gv2 ;; ;; block0(v0: i64, v1: i64): ;; @0038 v6 = load.i64 notrap aligned readonly v0+88 -;; v36 = iconst.i64 8 -;; @0038 v8 = iadd v6, v36 ; v36 = 8 -;; v27 = iconst.i64 -2 +;; v37 = iconst.i64 8 +;; @0038 v8 = iadd v6, v37 ; v37 = 8 +;; v28 = iconst.i64 -2 ;; @0038 v14 = iconst.i32 0 -;; @0036 v2 = iconst.i32 1 -;; @0038 v18 = load.i64 notrap aligned readonly v0+80 -;; @0038 v19 = load.i32 notrap aligned readonly v18 +;; v36 = iconst.i64 1 +;; @0038 v19 = load.i64 notrap aligned readonly v0+80 +;; @0038 v20 = load.i32 notrap aligned readonly v19 ;; @0034 jump block2 ;; ;; block2: -;; v37 = iadd.i64 v6, v36 ; v36 = 8 -;; @0038 v11 = load.i64 table_oob aligned table v37 -;; v38 = iconst.i64 -2 -;; v39 = band v11, v38 ; v38 = -2 -;; @0038 brif v11, block5(v39), block4 +;; v38 = iadd.i64 v6, v37 ; v37 = 8 +;; @0038 v11 = load.i64 table_oob aligned table v38 +;; v39 = iconst.i64 -2 +;; v40 = band v11, v39 ; v39 = -2 +;; @0038 brif v11, block5(v40), block4 ;; ;; block4 cold: -;; v40 = iconst.i32 0 -;; v41 = iconst.i32 1 -;; @0038 v16 = call fn0(v0, v40, v41) ; v40 = 0, v41 = 1 -;; @0038 jump block5(v16) +;; v41 = iconst.i32 0 +;; v42 = iconst.i64 1 +;; @0038 v17 = call fn0(v0, v41, v42) ; v41 = 0, v42 = 1 +;; @0038 jump block5(v17) ;; ;; block5(v13: i64): -;; @0038 v20 = load.i32 icall_null aligned readonly v13+16 -;; @0038 v21 = icmp eq v20, v19 -;; @0038 brif v21, block7, block6 +;; @0038 v21 = load.i32 icall_null aligned readonly v13+16 +;; @0038 v22 = icmp eq v21, v20 +;; @0038 brif v22, block7, block6 ;; ;; block6 cold: ;; @0038 trap bad_sig ;; ;; block7: -;; @0038 v22 = load.i64 notrap aligned readonly v13+8 -;; @0038 v23 = load.i64 notrap aligned readonly v13+24 -;; @0038 v24 = call_indirect sig0, v22(v23, v0) +;; @0038 v23 = load.i64 notrap aligned readonly v13+8 +;; @0038 v24 = load.i64 notrap aligned readonly v13+24 +;; @0038 v25 = call_indirect sig0, v23(v24, v0) ;; @003b jump block2 ;; } diff --git a/tests/disas/icall-simd.wat b/tests/disas/icall-simd.wat index 523948634e7e..17b4bcc1e272 100644 --- a/tests/disas/icall-simd.wat +++ b/tests/disas/icall-simd.wat @@ -15,7 +15,7 @@ ;; gv3 = vmctx ;; gv4 = load.i64 notrap aligned readonly gv3+88 ;; sig0 = (i64 vmctx, i64, i8x16) -> i8x16 tail -;; sig1 = (i64 vmctx, i32 uext, i32 uext) -> i64 system_v +;; sig1 = (i64 vmctx, i32 uext, i64) -> i64 system_v ;; fn0 = colocated u1:9 sig1 ;; stack_limit = gv2 ;; @@ -35,20 +35,21 @@ ;; block2 cold: ;; @0033 v16 = iconst.i32 0 ;; @0033 v17 = global_value.i64 gv3 -;; @0033 v18 = call fn0(v17, v16, v2) ; v16 = 0 -;; @0033 jump block3(v18) +;; @0033 v18 = uextend.i64 v2 +;; @0033 v19 = call fn0(v17, v16, v18) ; v16 = 0 +;; @0033 jump block3(v19) ;; ;; block3(v15: i64): -;; @0033 v19 = global_value.i64 gv3 -;; @0033 v20 = load.i64 notrap aligned readonly v19+80 -;; @0033 v21 = load.i32 notrap aligned readonly v20 -;; @0033 v22 = load.i32 icall_null aligned readonly v15+16 -;; @0033 v23 = icmp eq v22, v21 -;; @0033 trapz v23, bad_sig -;; @0033 v24 = load.i64 notrap aligned readonly v15+8 -;; @0033 v25 = load.i64 notrap aligned readonly v15+24 -;; @0033 v26 = call_indirect sig0, v24(v25, v0, v3) -;; @0036 jump block1(v26) +;; @0033 v20 = global_value.i64 gv3 +;; @0033 v21 = load.i64 notrap aligned readonly v20+80 +;; @0033 v22 = load.i32 notrap aligned readonly v21 +;; @0033 v23 = load.i32 icall_null aligned readonly v15+16 +;; @0033 v24 = icmp eq v23, v22 +;; @0033 trapz v24, bad_sig +;; @0033 v25 = load.i64 notrap aligned readonly v15+8 +;; @0033 v26 = load.i64 notrap aligned readonly v15+24 +;; @0033 v27 = call_indirect sig0, v25(v26, v0, v3) +;; @0036 jump block1(v27) ;; ;; block1(v4: i8x16): ;; @0036 return v4 diff --git a/tests/disas/icall.wat b/tests/disas/icall.wat index bdf6d722e9bc..83675583f1ed 100644 --- a/tests/disas/icall.wat +++ b/tests/disas/icall.wat @@ -15,7 +15,7 @@ ;; gv3 = vmctx ;; gv4 = load.i64 notrap aligned readonly gv3+88 ;; sig0 = (i64 vmctx, i64, f32) -> i32 tail -;; sig1 = (i64 vmctx, i32 uext, i32 uext) -> i64 system_v +;; sig1 = (i64 vmctx, i32 uext, i64) -> i64 system_v ;; fn0 = colocated u1:9 sig1 ;; stack_limit = gv2 ;; @@ -35,20 +35,21 @@ ;; block2 cold: ;; @0033 v16 = iconst.i32 0 ;; @0033 v17 = global_value.i64 gv3 -;; @0033 v18 = call fn0(v17, v16, v2) ; v16 = 0 -;; @0033 jump block3(v18) +;; @0033 v18 = uextend.i64 v2 +;; @0033 v19 = call fn0(v17, v16, v18) ; v16 = 0 +;; @0033 jump block3(v19) ;; ;; block3(v15: i64): -;; @0033 v19 = global_value.i64 gv3 -;; @0033 v20 = load.i64 notrap aligned readonly v19+80 -;; @0033 v21 = load.i32 notrap aligned readonly v20 -;; @0033 v22 = load.i32 icall_null aligned readonly v15+16 -;; @0033 v23 = icmp eq v22, v21 -;; @0033 trapz v23, bad_sig -;; @0033 v24 = load.i64 notrap aligned readonly v15+8 -;; @0033 v25 = load.i64 notrap aligned readonly v15+24 -;; @0033 v26 = call_indirect sig0, v24(v25, v0, v3) -;; @0036 jump block1(v26) +;; @0033 v20 = global_value.i64 gv3 +;; @0033 v21 = load.i64 notrap aligned readonly v20+80 +;; @0033 v22 = load.i32 notrap aligned readonly v21 +;; @0033 v23 = load.i32 icall_null aligned readonly v15+16 +;; @0033 v24 = icmp eq v23, v22 +;; @0033 trapz v24, bad_sig +;; @0033 v25 = load.i64 notrap aligned readonly v15+8 +;; @0033 v26 = load.i64 notrap aligned readonly v15+24 +;; @0033 v27 = call_indirect sig0, v25(v26, v0, v3) +;; @0036 jump block1(v27) ;; ;; block1(v4: i32): ;; @0036 return v4 diff --git a/tests/disas/indirect-call-no-caching.wat b/tests/disas/indirect-call-no-caching.wat index bb9e7e4fce76..7d63d0bc3fc4 100644 --- a/tests/disas/indirect-call-no-caching.wat +++ b/tests/disas/indirect-call-no-caching.wat @@ -69,7 +69,7 @@ ;; gv3 = vmctx ;; gv4 = load.i64 notrap aligned readonly gv3+88 ;; sig0 = (i64 vmctx, i64) -> i32 tail -;; sig1 = (i64 vmctx, i32 uext, i32 uext) -> i64 system_v +;; sig1 = (i64 vmctx, i32 uext, i64) -> i64 system_v ;; fn0 = colocated u1:9 sig1 ;; stack_limit = gv2 ;; @@ -89,20 +89,21 @@ ;; block2 cold: ;; @0050 v15 = iconst.i32 0 ;; @0050 v16 = global_value.i64 gv3 -;; @0050 v17 = call fn0(v16, v15, v2) ; v15 = 0 -;; @0050 jump block3(v17) +;; @0050 v17 = uextend.i64 v2 +;; @0050 v18 = call fn0(v16, v15, v17) ; v15 = 0 +;; @0050 jump block3(v18) ;; ;; block3(v14: i64): -;; @0050 v18 = global_value.i64 gv3 -;; @0050 v19 = load.i64 notrap aligned readonly v18+80 -;; @0050 v20 = load.i32 notrap aligned readonly v19 -;; @0050 v21 = load.i32 icall_null aligned readonly v14+16 -;; @0050 v22 = icmp eq v21, v20 -;; @0050 trapz v22, bad_sig -;; @0050 v23 = load.i64 notrap aligned readonly v14+8 -;; @0050 v24 = load.i64 notrap aligned readonly v14+24 -;; @0050 v25 = call_indirect sig0, v23(v24, v0) -;; @0053 jump block1(v25) +;; @0050 v19 = global_value.i64 gv3 +;; @0050 v20 = load.i64 notrap aligned readonly v19+80 +;; @0050 v21 = load.i32 notrap aligned readonly v20 +;; @0050 v22 = load.i32 icall_null aligned readonly v14+16 +;; @0050 v23 = icmp eq v22, v21 +;; @0050 trapz v23, bad_sig +;; @0050 v24 = load.i64 notrap aligned readonly v14+8 +;; @0050 v25 = load.i64 notrap aligned readonly v14+24 +;; @0050 v26 = call_indirect sig0, v24(v25, v0) +;; @0053 jump block1(v26) ;; ;; block1(v3: i32): ;; @0053 return v3 diff --git a/tests/disas/readonly-funcrefs.wat b/tests/disas/readonly-funcrefs.wat index 2346abfbfed5..c11f8e15eeb5 100644 --- a/tests/disas/readonly-funcrefs.wat +++ b/tests/disas/readonly-funcrefs.wat @@ -38,7 +38,7 @@ ;; gv3 = vmctx ;; gv4 = load.i64 notrap aligned readonly gv3+88 ;; sig0 = (i64 vmctx, i64) tail -;; sig1 = (i64 vmctx, i32 uext, i32 uext) -> i64 system_v +;; sig1 = (i64 vmctx, i32 uext, i64) -> i64 system_v ;; fn0 = colocated u1:9 sig1 ;; stack_limit = gv2 ;; @@ -48,34 +48,34 @@ ;; @0031 v9 = iconst.i64 0 ;; @0031 v6 = load.i64 notrap aligned readonly v0+88 ;; @0031 v5 = uextend.i64 v2 -;; v25 = iconst.i64 3 -;; @0031 v7 = ishl v5, v25 ; v25 = 3 +;; v26 = iconst.i64 3 +;; @0031 v7 = ishl v5, v26 ; v26 = 3 ;; @0031 v8 = iadd v6, v7 ;; @0031 v10 = select_spectre_guard v4, v9, v8 ; v9 = 0 ;; @0031 v11 = load.i64 table_oob aligned table v10 -;; v26 = iconst.i64 -2 -;; @0031 v12 = band v11, v26 ; v26 = -2 +;; v27 = iconst.i64 -2 +;; @0031 v12 = band v11, v27 ; v27 = -2 ;; @0031 brif v11, block3(v12), block2 ;; ;; block2 cold: ;; @0031 v14 = iconst.i32 0 -;; @0031 v16 = call fn0(v0, v14, v2) ; v14 = 0 -;; @0031 jump block3(v16) +;; @0031 v17 = call fn0(v0, v14, v5) ; v14 = 0 +;; @0031 jump block3(v17) ;; ;; block3(v13: i64): -;; @0031 v20 = load.i32 icall_null aligned readonly v13+16 -;; @0031 v18 = load.i64 notrap aligned readonly v0+80 -;; @0031 v19 = load.i32 notrap aligned readonly v18 -;; @0031 v21 = icmp eq v20, v19 -;; @0031 brif v21, block5, block4 +;; @0031 v21 = load.i32 icall_null aligned readonly v13+16 +;; @0031 v19 = load.i64 notrap aligned readonly v0+80 +;; @0031 v20 = load.i32 notrap aligned readonly v19 +;; @0031 v22 = icmp eq v21, v20 +;; @0031 brif v22, block5, block4 ;; ;; block4 cold: ;; @0031 trap bad_sig ;; ;; block5: -;; @0031 v22 = load.i64 notrap aligned readonly v13+8 -;; @0031 v23 = load.i64 notrap aligned readonly v13+24 -;; @0031 call_indirect sig0, v22(v23, v0) +;; @0031 v23 = load.i64 notrap aligned readonly v13+8 +;; @0031 v24 = load.i64 notrap aligned readonly v13+24 +;; @0031 call_indirect sig0, v23(v24, v0) ;; @0034 jump block1 ;; ;; block1: diff --git a/tests/disas/table-copy.wat b/tests/disas/table-copy.wat index c596de21a3ff..14c2d5b0a0a9 100644 --- a/tests/disas/table-copy.wat +++ b/tests/disas/table-copy.wat @@ -67,15 +67,18 @@ ;; gv1 = load.i64 notrap aligned readonly gv0+8 ;; gv2 = load.i64 notrap aligned gv1 ;; gv3 = vmctx -;; sig0 = (i64 vmctx, i32 uext, i32 uext, i32 uext, i32 uext, i32 uext) system_v +;; sig0 = (i64 vmctx, i32 uext, i32 uext, i64, i64, i64) system_v ;; fn0 = colocated u1:1 sig0 ;; stack_limit = gv2 ;; ;; block0(v0: i64, v1: i64, v2: i32, v3: i32, v4: i32, v5: i32): -;; @0090 v7 = iconst.i32 0 -;; @0090 v8 = iconst.i32 1 -;; @0090 v9 = global_value.i64 gv3 -;; @0090 call fn0(v9, v7, v8, v3, v4, v5) ; v7 = 0, v8 = 1 +;; @0090 v7 = uextend.i64 v3 +;; @0090 v8 = uextend.i64 v4 +;; @0090 v9 = uextend.i64 v5 +;; @0090 v10 = iconst.i32 0 +;; @0090 v11 = iconst.i32 1 +;; @0090 v12 = global_value.i64 gv3 +;; @0090 call fn0(v12, v10, v11, v7, v8, v9) ; v10 = 0, v11 = 1 ;; @0094 jump block1(v2) ;; ;; block1(v6: i32): @@ -87,15 +90,18 @@ ;; gv1 = load.i64 notrap aligned readonly gv0+8 ;; gv2 = load.i64 notrap aligned gv1 ;; gv3 = vmctx -;; sig0 = (i64 vmctx, i32 uext, i32 uext, i32 uext, i32 uext, i32 uext) system_v +;; sig0 = (i64 vmctx, i32 uext, i32 uext, i64, i64, i64) system_v ;; fn0 = colocated u1:1 sig0 ;; stack_limit = gv2 ;; ;; block0(v0: i64, v1: i64, v2: i32, v3: i32, v4: i32, v5: i32): -;; @009f v7 = iconst.i32 1 -;; @009f v8 = iconst.i32 0 -;; @009f v9 = global_value.i64 gv3 -;; @009f call fn0(v9, v7, v8, v3, v4, v5) ; v7 = 1, v8 = 0 +;; @009f v7 = uextend.i64 v3 +;; @009f v8 = uextend.i64 v4 +;; @009f v9 = uextend.i64 v5 +;; @009f v10 = iconst.i32 1 +;; @009f v11 = iconst.i32 0 +;; @009f v12 = global_value.i64 gv3 +;; @009f call fn0(v12, v10, v11, v7, v8, v9) ; v10 = 1, v11 = 0 ;; @00a3 jump block1(v2) ;; ;; block1(v6: i32): diff --git a/tests/disas/table-get.wat b/tests/disas/table-get.wat index 2a2207b1a7f3..c12ffdeeb427 100644 --- a/tests/disas/table-get.wat +++ b/tests/disas/table-get.wat @@ -21,98 +21,99 @@ ;; gv2 = load.i64 notrap aligned gv1 ;; gv3 = vmctx ;; gv4 = load.i64 notrap aligned gv3+88 -;; gv5 = load.i32 notrap aligned gv3+96 +;; gv5 = load.i64 notrap aligned gv3+96 ;; sig0 = (i64 vmctx, i32) -> i32 system_v ;; fn0 = colocated u1:26 sig0 ;; stack_limit = gv2 ;; ;; block0(v0: i64, v1: i64): ;; @0051 v3 = iconst.i32 0 -;; @0053 v4 = load.i32 notrap aligned v0+96 -;; @0053 v5 = icmp uge v3, v4 ; v3 = 0 -;; @0053 v6 = uextend.i64 v3 ; v3 = 0 -;; @0053 v7 = load.i64 notrap aligned v0+88 -;; v52 = iconst.i64 2 -;; @0053 v8 = ishl v6, v52 ; v52 = 2 -;; @0053 v9 = iadd v7, v8 -;; @0053 v10 = iconst.i64 0 -;; @0053 v11 = select_spectre_guard v5, v10, v9 ; v10 = 0 -;; @0053 v12 = load.i32 table_oob aligned table v11 -;; v53 = stack_addr.i64 ss0 -;; store notrap v12, v53 +;; @0053 v4 = load.i64 notrap aligned v0+96 +;; @0053 v5 = ireduce.i32 v4 +;; @0053 v6 = icmp uge v3, v5 ; v3 = 0 +;; @0053 v7 = uextend.i64 v3 ; v3 = 0 +;; @0053 v8 = load.i64 notrap aligned v0+88 +;; v53 = iconst.i64 2 +;; @0053 v9 = ishl v7, v53 ; v53 = 2 +;; @0053 v10 = iadd v8, v9 +;; @0053 v11 = iconst.i64 0 +;; @0053 v12 = select_spectre_guard v6, v11, v10 ; v11 = 0 +;; @0053 v13 = load.i32 table_oob aligned table v12 ;; v54 = stack_addr.i64 ss0 -;; v49 = load.i32 notrap v54 -;; v55 = iconst.i32 0 -;; @0053 v13 = icmp eq v49, v55 ; v55 = 0 -;; @0053 brif v13, block5, block2 +;; store notrap v13, v54 +;; v55 = stack_addr.i64 ss0 +;; v50 = load.i32 notrap v55 +;; v56 = iconst.i32 0 +;; @0053 v14 = icmp eq v50, v56 ; v56 = 0 +;; @0053 brif v14, block5, block2 ;; ;; block2: -;; @0053 v15 = load.i64 notrap aligned v0+56 -;; @0053 v16 = load.i64 notrap aligned v15 -;; @0053 v17 = load.i64 notrap aligned v15+8 -;; @0053 v18 = icmp eq v16, v17 -;; @0053 brif v18, block3, block4 +;; @0053 v16 = load.i64 notrap aligned v0+56 +;; @0053 v17 = load.i64 notrap aligned v16 +;; @0053 v18 = load.i64 notrap aligned v16+8 +;; @0053 v19 = icmp eq v17, v18 +;; @0053 brif v19, block3, block4 ;; ;; block4: -;; @0053 v20 = load.i64 notrap aligned readonly v0+40 -;; @0053 v21 = load.i64 notrap aligned readonly v0+48 -;; v56 = stack_addr.i64 ss0 -;; v48 = load.i32 notrap v56 -;; @0053 v22 = uextend.i64 v48 -;; @0053 v23 = iconst.i64 8 -;; @0053 v24 = uadd_overflow_trap v22, v23, user65535 ; v23 = 8 -;; @0053 v25 = iconst.i64 8 -;; @0053 v26 = uadd_overflow_trap v24, v25, user65535 ; v25 = 8 -;; @0053 v27 = icmp ult v26, v21 -;; @0053 brif v27, block7, block6 +;; @0053 v21 = load.i64 notrap aligned readonly v0+40 +;; @0053 v22 = load.i64 notrap aligned readonly v0+48 +;; v57 = stack_addr.i64 ss0 +;; v49 = load.i32 notrap v57 +;; @0053 v23 = uextend.i64 v49 +;; @0053 v24 = iconst.i64 8 +;; @0053 v25 = uadd_overflow_trap v23, v24, user65535 ; v24 = 8 +;; @0053 v26 = iconst.i64 8 +;; @0053 v27 = uadd_overflow_trap v25, v26, user65535 ; v26 = 8 +;; @0053 v28 = icmp ult v27, v22 +;; @0053 brif v28, block7, block6 ;; ;; block6 cold: ;; @0053 trap user65535 ;; ;; block7: -;; @0053 v28 = iadd.i64 v20, v24 -;; @0053 v29 = load.i64 notrap aligned v28 -;; v57 = iconst.i64 1 -;; @0053 v30 = iadd v29, v57 ; v57 = 1 -;; @0053 v32 = load.i64 notrap aligned readonly v0+40 -;; @0053 v33 = load.i64 notrap aligned readonly v0+48 -;; v58 = stack_addr.i64 ss0 -;; v47 = load.i32 notrap v58 -;; @0053 v34 = uextend.i64 v47 -;; @0053 v35 = iconst.i64 8 -;; @0053 v36 = uadd_overflow_trap v34, v35, user65535 ; v35 = 8 -;; @0053 v37 = iconst.i64 8 -;; @0053 v38 = uadd_overflow_trap v36, v37, user65535 ; v37 = 8 -;; @0053 v39 = icmp ult v38, v33 -;; @0053 brif v39, block9, block8 +;; @0053 v29 = iadd.i64 v21, v25 +;; @0053 v30 = load.i64 notrap aligned v29 +;; v58 = iconst.i64 1 +;; @0053 v31 = iadd v30, v58 ; v58 = 1 +;; @0053 v33 = load.i64 notrap aligned readonly v0+40 +;; @0053 v34 = load.i64 notrap aligned readonly v0+48 +;; v59 = stack_addr.i64 ss0 +;; v48 = load.i32 notrap v59 +;; @0053 v35 = uextend.i64 v48 +;; @0053 v36 = iconst.i64 8 +;; @0053 v37 = uadd_overflow_trap v35, v36, user65535 ; v36 = 8 +;; @0053 v38 = iconst.i64 8 +;; @0053 v39 = uadd_overflow_trap v37, v38, user65535 ; v38 = 8 +;; @0053 v40 = icmp ult v39, v34 +;; @0053 brif v40, block9, block8 ;; ;; block8 cold: ;; @0053 trap user65535 ;; ;; block9: -;; @0053 v40 = iadd.i64 v32, v36 -;; @0053 store.i64 notrap aligned v30, v40 -;; v59 = stack_addr.i64 ss0 -;; v46 = load.i32 notrap v59 -;; @0053 store notrap aligned v46, v16 -;; v60 = iconst.i64 4 -;; @0053 v41 = iadd.i64 v16, v60 ; v60 = 4 -;; @0053 store notrap aligned v41, v15 +;; @0053 v41 = iadd.i64 v33, v37 +;; @0053 store.i64 notrap aligned v31, v41 +;; v60 = stack_addr.i64 ss0 +;; v47 = load.i32 notrap v60 +;; @0053 store notrap aligned v47, v17 +;; v61 = iconst.i64 4 +;; @0053 v42 = iadd.i64 v17, v61 ; v61 = 4 +;; @0053 store notrap aligned v42, v16 ;; @0053 jump block5 ;; ;; block3 cold: -;; v61 = stack_addr.i64 ss0 -;; v45 = load.i32 notrap v61 -;; @0053 v43 = call fn0(v0, v45), stack_map=[i32 @ ss0+0] +;; v62 = stack_addr.i64 ss0 +;; v46 = load.i32 notrap v62 +;; @0053 v44 = call fn0(v0, v46), stack_map=[i32 @ ss0+0] ;; @0053 jump block5 ;; ;; block5: -;; v62 = stack_addr.i64 ss0 -;; v44 = load.i32 notrap v62 +;; v63 = stack_addr.i64 ss0 +;; v45 = load.i32 notrap v63 ;; @0055 jump block1 ;; ;; block1: -;; @0055 return v44 +;; @0055 return v45 ;; } ;; ;; function u0:1(i64 vmctx, i64, i32) -> i32 tail { @@ -122,95 +123,96 @@ ;; gv2 = load.i64 notrap aligned gv1 ;; gv3 = vmctx ;; gv4 = load.i64 notrap aligned gv3+88 -;; gv5 = load.i32 notrap aligned gv3+96 +;; gv5 = load.i64 notrap aligned gv3+96 ;; sig0 = (i64 vmctx, i32) -> i32 system_v ;; fn0 = colocated u1:26 sig0 ;; stack_limit = gv2 ;; ;; block0(v0: i64, v1: i64, v2: i32): -;; @005a v4 = load.i32 notrap aligned v0+96 -;; @005a v5 = icmp uge v2, v4 -;; @005a v6 = uextend.i64 v2 -;; @005a v7 = load.i64 notrap aligned v0+88 -;; v52 = iconst.i64 2 -;; @005a v8 = ishl v6, v52 ; v52 = 2 -;; @005a v9 = iadd v7, v8 -;; @005a v10 = iconst.i64 0 -;; @005a v11 = select_spectre_guard v5, v10, v9 ; v10 = 0 -;; @005a v12 = load.i32 table_oob aligned table v11 -;; v53 = stack_addr.i64 ss0 -;; store notrap v12, v53 +;; @005a v4 = load.i64 notrap aligned v0+96 +;; @005a v5 = ireduce.i32 v4 +;; @005a v6 = icmp uge v2, v5 +;; @005a v7 = uextend.i64 v2 +;; @005a v8 = load.i64 notrap aligned v0+88 +;; v53 = iconst.i64 2 +;; @005a v9 = ishl v7, v53 ; v53 = 2 +;; @005a v10 = iadd v8, v9 +;; @005a v11 = iconst.i64 0 +;; @005a v12 = select_spectre_guard v6, v11, v10 ; v11 = 0 +;; @005a v13 = load.i32 table_oob aligned table v12 ;; v54 = stack_addr.i64 ss0 -;; v49 = load.i32 notrap v54 -;; v55 = iconst.i32 0 -;; @005a v13 = icmp eq v49, v55 ; v55 = 0 -;; @005a brif v13, block5, block2 +;; store notrap v13, v54 +;; v55 = stack_addr.i64 ss0 +;; v50 = load.i32 notrap v55 +;; v56 = iconst.i32 0 +;; @005a v14 = icmp eq v50, v56 ; v56 = 0 +;; @005a brif v14, block5, block2 ;; ;; block2: -;; @005a v15 = load.i64 notrap aligned v0+56 -;; @005a v16 = load.i64 notrap aligned v15 -;; @005a v17 = load.i64 notrap aligned v15+8 -;; @005a v18 = icmp eq v16, v17 -;; @005a brif v18, block3, block4 +;; @005a v16 = load.i64 notrap aligned v0+56 +;; @005a v17 = load.i64 notrap aligned v16 +;; @005a v18 = load.i64 notrap aligned v16+8 +;; @005a v19 = icmp eq v17, v18 +;; @005a brif v19, block3, block4 ;; ;; block4: -;; @005a v20 = load.i64 notrap aligned readonly v0+40 -;; @005a v21 = load.i64 notrap aligned readonly v0+48 -;; v56 = stack_addr.i64 ss0 -;; v48 = load.i32 notrap v56 -;; @005a v22 = uextend.i64 v48 -;; @005a v23 = iconst.i64 8 -;; @005a v24 = uadd_overflow_trap v22, v23, user65535 ; v23 = 8 -;; @005a v25 = iconst.i64 8 -;; @005a v26 = uadd_overflow_trap v24, v25, user65535 ; v25 = 8 -;; @005a v27 = icmp ult v26, v21 -;; @005a brif v27, block7, block6 +;; @005a v21 = load.i64 notrap aligned readonly v0+40 +;; @005a v22 = load.i64 notrap aligned readonly v0+48 +;; v57 = stack_addr.i64 ss0 +;; v49 = load.i32 notrap v57 +;; @005a v23 = uextend.i64 v49 +;; @005a v24 = iconst.i64 8 +;; @005a v25 = uadd_overflow_trap v23, v24, user65535 ; v24 = 8 +;; @005a v26 = iconst.i64 8 +;; @005a v27 = uadd_overflow_trap v25, v26, user65535 ; v26 = 8 +;; @005a v28 = icmp ult v27, v22 +;; @005a brif v28, block7, block6 ;; ;; block6 cold: ;; @005a trap user65535 ;; ;; block7: -;; @005a v28 = iadd.i64 v20, v24 -;; @005a v29 = load.i64 notrap aligned v28 -;; v57 = iconst.i64 1 -;; @005a v30 = iadd v29, v57 ; v57 = 1 -;; @005a v32 = load.i64 notrap aligned readonly v0+40 -;; @005a v33 = load.i64 notrap aligned readonly v0+48 -;; v58 = stack_addr.i64 ss0 -;; v47 = load.i32 notrap v58 -;; @005a v34 = uextend.i64 v47 -;; @005a v35 = iconst.i64 8 -;; @005a v36 = uadd_overflow_trap v34, v35, user65535 ; v35 = 8 -;; @005a v37 = iconst.i64 8 -;; @005a v38 = uadd_overflow_trap v36, v37, user65535 ; v37 = 8 -;; @005a v39 = icmp ult v38, v33 -;; @005a brif v39, block9, block8 +;; @005a v29 = iadd.i64 v21, v25 +;; @005a v30 = load.i64 notrap aligned v29 +;; v58 = iconst.i64 1 +;; @005a v31 = iadd v30, v58 ; v58 = 1 +;; @005a v33 = load.i64 notrap aligned readonly v0+40 +;; @005a v34 = load.i64 notrap aligned readonly v0+48 +;; v59 = stack_addr.i64 ss0 +;; v48 = load.i32 notrap v59 +;; @005a v35 = uextend.i64 v48 +;; @005a v36 = iconst.i64 8 +;; @005a v37 = uadd_overflow_trap v35, v36, user65535 ; v36 = 8 +;; @005a v38 = iconst.i64 8 +;; @005a v39 = uadd_overflow_trap v37, v38, user65535 ; v38 = 8 +;; @005a v40 = icmp ult v39, v34 +;; @005a brif v40, block9, block8 ;; ;; block8 cold: ;; @005a trap user65535 ;; ;; block9: -;; @005a v40 = iadd.i64 v32, v36 -;; @005a store.i64 notrap aligned v30, v40 -;; v59 = stack_addr.i64 ss0 -;; v46 = load.i32 notrap v59 -;; @005a store notrap aligned v46, v16 -;; v60 = iconst.i64 4 -;; @005a v41 = iadd.i64 v16, v60 ; v60 = 4 -;; @005a store notrap aligned v41, v15 +;; @005a v41 = iadd.i64 v33, v37 +;; @005a store.i64 notrap aligned v31, v41 +;; v60 = stack_addr.i64 ss0 +;; v47 = load.i32 notrap v60 +;; @005a store notrap aligned v47, v17 +;; v61 = iconst.i64 4 +;; @005a v42 = iadd.i64 v17, v61 ; v61 = 4 +;; @005a store notrap aligned v42, v16 ;; @005a jump block5 ;; ;; block3 cold: -;; v61 = stack_addr.i64 ss0 -;; v45 = load.i32 notrap v61 -;; @005a v43 = call fn0(v0, v45), stack_map=[i32 @ ss0+0] +;; v62 = stack_addr.i64 ss0 +;; v46 = load.i32 notrap v62 +;; @005a v44 = call fn0(v0, v46), stack_map=[i32 @ ss0+0] ;; @005a jump block5 ;; ;; block5: -;; v62 = stack_addr.i64 ss0 -;; v44 = load.i32 notrap v62 +;; v63 = stack_addr.i64 ss0 +;; v45 = load.i32 notrap v63 ;; @005c jump block1 ;; ;; block1: -;; @005c return v44 +;; @005c return v45 ;; } diff --git a/tests/disas/table-set.wat b/tests/disas/table-set.wat index 54029fe7ceee..1aeb42ff1da7 100644 --- a/tests/disas/table-set.wat +++ b/tests/disas/table-set.wat @@ -22,114 +22,115 @@ ;; gv2 = load.i64 notrap aligned gv1 ;; gv3 = vmctx ;; gv4 = load.i64 notrap aligned gv3+88 -;; gv5 = load.i32 notrap aligned gv3+96 +;; gv5 = load.i64 notrap aligned gv3+96 ;; sig0 = (i64 vmctx, i32 uext) system_v ;; fn0 = colocated u1:25 sig0 ;; stack_limit = gv2 ;; ;; block0(v0: i64, v1: i64, v2: i32): ;; @0051 v3 = iconst.i32 0 -;; @0055 v4 = load.i32 notrap aligned v0+96 -;; @0055 v5 = icmp uge v3, v4 ; v3 = 0 -;; @0055 v6 = uextend.i64 v3 ; v3 = 0 -;; @0055 v7 = load.i64 notrap aligned v0+88 -;; v63 = iconst.i64 2 -;; @0055 v8 = ishl v6, v63 ; v63 = 2 -;; @0055 v9 = iadd v7, v8 -;; @0055 v10 = iconst.i64 0 -;; @0055 v11 = select_spectre_guard v5, v10, v9 ; v10 = 0 -;; @0055 v12 = load.i32 table_oob aligned table v11 -;; v64 = iconst.i32 0 -;; @0055 v13 = icmp eq v2, v64 ; v64 = 0 -;; @0055 brif v13, block3, block2 +;; @0055 v4 = load.i64 notrap aligned v0+96 +;; @0055 v5 = ireduce.i32 v4 +;; @0055 v6 = icmp uge v3, v5 ; v3 = 0 +;; @0055 v7 = uextend.i64 v3 ; v3 = 0 +;; @0055 v8 = load.i64 notrap aligned v0+88 +;; v64 = iconst.i64 2 +;; @0055 v9 = ishl v7, v64 ; v64 = 2 +;; @0055 v10 = iadd v8, v9 +;; @0055 v11 = iconst.i64 0 +;; @0055 v12 = select_spectre_guard v6, v11, v10 ; v11 = 0 +;; @0055 v13 = load.i32 table_oob aligned table v12 +;; v65 = iconst.i32 0 +;; @0055 v14 = icmp eq v2, v65 ; v65 = 0 +;; @0055 brif v14, block3, block2 ;; ;; block2: -;; @0055 v15 = load.i64 notrap aligned readonly v0+40 -;; @0055 v16 = load.i64 notrap aligned readonly v0+48 -;; @0055 v17 = uextend.i64 v2 -;; @0055 v18 = iconst.i64 8 -;; @0055 v19 = uadd_overflow_trap v17, v18, user65535 ; v18 = 8 -;; @0055 v20 = iconst.i64 8 -;; @0055 v21 = uadd_overflow_trap v19, v20, user65535 ; v20 = 8 -;; @0055 v22 = icmp ult v21, v16 -;; @0055 brif v22, block9, block8 +;; @0055 v16 = load.i64 notrap aligned readonly v0+40 +;; @0055 v17 = load.i64 notrap aligned readonly v0+48 +;; @0055 v18 = uextend.i64 v2 +;; @0055 v19 = iconst.i64 8 +;; @0055 v20 = uadd_overflow_trap v18, v19, user65535 ; v19 = 8 +;; @0055 v21 = iconst.i64 8 +;; @0055 v22 = uadd_overflow_trap v20, v21, user65535 ; v21 = 8 +;; @0055 v23 = icmp ult v22, v17 +;; @0055 brif v23, block9, block8 ;; ;; block8 cold: ;; @0055 trap user65535 ;; ;; block9: -;; @0055 v23 = iadd.i64 v15, v19 -;; @0055 v24 = load.i64 notrap aligned v23 -;; v65 = iconst.i64 1 -;; @0055 v25 = iadd v24, v65 ; v65 = 1 -;; @0055 v27 = load.i64 notrap aligned readonly v0+40 -;; @0055 v28 = load.i64 notrap aligned readonly v0+48 -;; @0055 v29 = uextend.i64 v2 -;; @0055 v30 = iconst.i64 8 -;; @0055 v31 = uadd_overflow_trap v29, v30, user65535 ; v30 = 8 -;; @0055 v32 = iconst.i64 8 -;; @0055 v33 = uadd_overflow_trap v31, v32, user65535 ; v32 = 8 -;; @0055 v34 = icmp ult v33, v28 -;; @0055 brif v34, block11, block10 +;; @0055 v24 = iadd.i64 v16, v20 +;; @0055 v25 = load.i64 notrap aligned v24 +;; v66 = iconst.i64 1 +;; @0055 v26 = iadd v25, v66 ; v66 = 1 +;; @0055 v28 = load.i64 notrap aligned readonly v0+40 +;; @0055 v29 = load.i64 notrap aligned readonly v0+48 +;; @0055 v30 = uextend.i64 v2 +;; @0055 v31 = iconst.i64 8 +;; @0055 v32 = uadd_overflow_trap v30, v31, user65535 ; v31 = 8 +;; @0055 v33 = iconst.i64 8 +;; @0055 v34 = uadd_overflow_trap v32, v33, user65535 ; v33 = 8 +;; @0055 v35 = icmp ult v34, v29 +;; @0055 brif v35, block11, block10 ;; ;; block10 cold: ;; @0055 trap user65535 ;; ;; block11: -;; @0055 v35 = iadd.i64 v27, v31 -;; @0055 store.i64 notrap aligned v25, v35 +;; @0055 v36 = iadd.i64 v28, v32 +;; @0055 store.i64 notrap aligned v26, v36 ;; @0055 jump block3 ;; ;; block3: -;; @0055 store.i32 table_oob aligned table v2, v11 -;; v66 = iconst.i32 0 -;; @0055 v36 = icmp.i32 eq v12, v66 ; v66 = 0 -;; @0055 brif v36, block7, block4 +;; @0055 store.i32 table_oob aligned table v2, v12 +;; v67 = iconst.i32 0 +;; @0055 v37 = icmp.i32 eq v13, v67 ; v67 = 0 +;; @0055 brif v37, block7, block4 ;; ;; block4: -;; @0055 v38 = load.i64 notrap aligned readonly v0+40 -;; @0055 v39 = load.i64 notrap aligned readonly v0+48 -;; @0055 v40 = uextend.i64 v12 -;; @0055 v41 = iconst.i64 8 -;; @0055 v42 = uadd_overflow_trap v40, v41, user65535 ; v41 = 8 -;; @0055 v43 = iconst.i64 8 -;; @0055 v44 = uadd_overflow_trap v42, v43, user65535 ; v43 = 8 -;; @0055 v45 = icmp ult v44, v39 -;; @0055 brif v45, block13, block12 +;; @0055 v39 = load.i64 notrap aligned readonly v0+40 +;; @0055 v40 = load.i64 notrap aligned readonly v0+48 +;; @0055 v41 = uextend.i64 v13 +;; @0055 v42 = iconst.i64 8 +;; @0055 v43 = uadd_overflow_trap v41, v42, user65535 ; v42 = 8 +;; @0055 v44 = iconst.i64 8 +;; @0055 v45 = uadd_overflow_trap v43, v44, user65535 ; v44 = 8 +;; @0055 v46 = icmp ult v45, v40 +;; @0055 brif v46, block13, block12 ;; ;; block12 cold: ;; @0055 trap user65535 ;; ;; block13: -;; @0055 v46 = iadd.i64 v38, v42 -;; @0055 v47 = load.i64 notrap aligned v46 -;; v67 = iconst.i64 -1 -;; @0055 v48 = iadd v47, v67 ; v67 = -1 -;; v68 = iconst.i64 0 -;; @0055 v49 = icmp eq v48, v68 ; v68 = 0 -;; @0055 brif v49, block5, block6 +;; @0055 v47 = iadd.i64 v39, v43 +;; @0055 v48 = load.i64 notrap aligned v47 +;; v68 = iconst.i64 -1 +;; @0055 v49 = iadd v48, v68 ; v68 = -1 +;; v69 = iconst.i64 0 +;; @0055 v50 = icmp eq v49, v69 ; v69 = 0 +;; @0055 brif v50, block5, block6 ;; ;; block5 cold: -;; @0055 call fn0(v0, v12) +;; @0055 call fn0(v0, v13) ;; @0055 jump block7 ;; ;; block6: -;; @0055 v52 = load.i64 notrap aligned readonly v0+40 -;; @0055 v53 = load.i64 notrap aligned readonly v0+48 -;; @0055 v54 = uextend.i64 v12 -;; @0055 v55 = iconst.i64 8 -;; @0055 v56 = uadd_overflow_trap v54, v55, user65535 ; v55 = 8 -;; @0055 v57 = iconst.i64 8 -;; @0055 v58 = uadd_overflow_trap v56, v57, user65535 ; v57 = 8 -;; @0055 v59 = icmp ult v58, v53 -;; @0055 brif v59, block15, block14 +;; @0055 v53 = load.i64 notrap aligned readonly v0+40 +;; @0055 v54 = load.i64 notrap aligned readonly v0+48 +;; @0055 v55 = uextend.i64 v13 +;; @0055 v56 = iconst.i64 8 +;; @0055 v57 = uadd_overflow_trap v55, v56, user65535 ; v56 = 8 +;; @0055 v58 = iconst.i64 8 +;; @0055 v59 = uadd_overflow_trap v57, v58, user65535 ; v58 = 8 +;; @0055 v60 = icmp ult v59, v54 +;; @0055 brif v60, block15, block14 ;; ;; block14 cold: ;; @0055 trap user65535 ;; ;; block15: -;; @0055 v60 = iadd.i64 v52, v56 -;; @0055 store.i64 notrap aligned v48, v60 +;; @0055 v61 = iadd.i64 v53, v57 +;; @0055 store.i64 notrap aligned v49, v61 ;; @0055 jump block7 ;; ;; block7: @@ -145,113 +146,114 @@ ;; gv2 = load.i64 notrap aligned gv1 ;; gv3 = vmctx ;; gv4 = load.i64 notrap aligned gv3+88 -;; gv5 = load.i32 notrap aligned gv3+96 +;; gv5 = load.i64 notrap aligned gv3+96 ;; sig0 = (i64 vmctx, i32 uext) system_v ;; fn0 = colocated u1:25 sig0 ;; stack_limit = gv2 ;; ;; block0(v0: i64, v1: i64, v2: i32, v3: i32): -;; @005e v4 = load.i32 notrap aligned v0+96 -;; @005e v5 = icmp uge v2, v4 -;; @005e v6 = uextend.i64 v2 -;; @005e v7 = load.i64 notrap aligned v0+88 -;; v63 = iconst.i64 2 -;; @005e v8 = ishl v6, v63 ; v63 = 2 -;; @005e v9 = iadd v7, v8 -;; @005e v10 = iconst.i64 0 -;; @005e v11 = select_spectre_guard v5, v10, v9 ; v10 = 0 -;; @005e v12 = load.i32 table_oob aligned table v11 -;; v64 = iconst.i32 0 -;; @005e v13 = icmp eq v3, v64 ; v64 = 0 -;; @005e brif v13, block3, block2 +;; @005e v4 = load.i64 notrap aligned v0+96 +;; @005e v5 = ireduce.i32 v4 +;; @005e v6 = icmp uge v2, v5 +;; @005e v7 = uextend.i64 v2 +;; @005e v8 = load.i64 notrap aligned v0+88 +;; v64 = iconst.i64 2 +;; @005e v9 = ishl v7, v64 ; v64 = 2 +;; @005e v10 = iadd v8, v9 +;; @005e v11 = iconst.i64 0 +;; @005e v12 = select_spectre_guard v6, v11, v10 ; v11 = 0 +;; @005e v13 = load.i32 table_oob aligned table v12 +;; v65 = iconst.i32 0 +;; @005e v14 = icmp eq v3, v65 ; v65 = 0 +;; @005e brif v14, block3, block2 ;; ;; block2: -;; @005e v15 = load.i64 notrap aligned readonly v0+40 -;; @005e v16 = load.i64 notrap aligned readonly v0+48 -;; @005e v17 = uextend.i64 v3 -;; @005e v18 = iconst.i64 8 -;; @005e v19 = uadd_overflow_trap v17, v18, user65535 ; v18 = 8 -;; @005e v20 = iconst.i64 8 -;; @005e v21 = uadd_overflow_trap v19, v20, user65535 ; v20 = 8 -;; @005e v22 = icmp ult v21, v16 -;; @005e brif v22, block9, block8 +;; @005e v16 = load.i64 notrap aligned readonly v0+40 +;; @005e v17 = load.i64 notrap aligned readonly v0+48 +;; @005e v18 = uextend.i64 v3 +;; @005e v19 = iconst.i64 8 +;; @005e v20 = uadd_overflow_trap v18, v19, user65535 ; v19 = 8 +;; @005e v21 = iconst.i64 8 +;; @005e v22 = uadd_overflow_trap v20, v21, user65535 ; v21 = 8 +;; @005e v23 = icmp ult v22, v17 +;; @005e brif v23, block9, block8 ;; ;; block8 cold: ;; @005e trap user65535 ;; ;; block9: -;; @005e v23 = iadd.i64 v15, v19 -;; @005e v24 = load.i64 notrap aligned v23 -;; v65 = iconst.i64 1 -;; @005e v25 = iadd v24, v65 ; v65 = 1 -;; @005e v27 = load.i64 notrap aligned readonly v0+40 -;; @005e v28 = load.i64 notrap aligned readonly v0+48 -;; @005e v29 = uextend.i64 v3 -;; @005e v30 = iconst.i64 8 -;; @005e v31 = uadd_overflow_trap v29, v30, user65535 ; v30 = 8 -;; @005e v32 = iconst.i64 8 -;; @005e v33 = uadd_overflow_trap v31, v32, user65535 ; v32 = 8 -;; @005e v34 = icmp ult v33, v28 -;; @005e brif v34, block11, block10 +;; @005e v24 = iadd.i64 v16, v20 +;; @005e v25 = load.i64 notrap aligned v24 +;; v66 = iconst.i64 1 +;; @005e v26 = iadd v25, v66 ; v66 = 1 +;; @005e v28 = load.i64 notrap aligned readonly v0+40 +;; @005e v29 = load.i64 notrap aligned readonly v0+48 +;; @005e v30 = uextend.i64 v3 +;; @005e v31 = iconst.i64 8 +;; @005e v32 = uadd_overflow_trap v30, v31, user65535 ; v31 = 8 +;; @005e v33 = iconst.i64 8 +;; @005e v34 = uadd_overflow_trap v32, v33, user65535 ; v33 = 8 +;; @005e v35 = icmp ult v34, v29 +;; @005e brif v35, block11, block10 ;; ;; block10 cold: ;; @005e trap user65535 ;; ;; block11: -;; @005e v35 = iadd.i64 v27, v31 -;; @005e store.i64 notrap aligned v25, v35 +;; @005e v36 = iadd.i64 v28, v32 +;; @005e store.i64 notrap aligned v26, v36 ;; @005e jump block3 ;; ;; block3: -;; @005e store.i32 table_oob aligned table v3, v11 -;; v66 = iconst.i32 0 -;; @005e v36 = icmp.i32 eq v12, v66 ; v66 = 0 -;; @005e brif v36, block7, block4 +;; @005e store.i32 table_oob aligned table v3, v12 +;; v67 = iconst.i32 0 +;; @005e v37 = icmp.i32 eq v13, v67 ; v67 = 0 +;; @005e brif v37, block7, block4 ;; ;; block4: -;; @005e v38 = load.i64 notrap aligned readonly v0+40 -;; @005e v39 = load.i64 notrap aligned readonly v0+48 -;; @005e v40 = uextend.i64 v12 -;; @005e v41 = iconst.i64 8 -;; @005e v42 = uadd_overflow_trap v40, v41, user65535 ; v41 = 8 -;; @005e v43 = iconst.i64 8 -;; @005e v44 = uadd_overflow_trap v42, v43, user65535 ; v43 = 8 -;; @005e v45 = icmp ult v44, v39 -;; @005e brif v45, block13, block12 +;; @005e v39 = load.i64 notrap aligned readonly v0+40 +;; @005e v40 = load.i64 notrap aligned readonly v0+48 +;; @005e v41 = uextend.i64 v13 +;; @005e v42 = iconst.i64 8 +;; @005e v43 = uadd_overflow_trap v41, v42, user65535 ; v42 = 8 +;; @005e v44 = iconst.i64 8 +;; @005e v45 = uadd_overflow_trap v43, v44, user65535 ; v44 = 8 +;; @005e v46 = icmp ult v45, v40 +;; @005e brif v46, block13, block12 ;; ;; block12 cold: ;; @005e trap user65535 ;; ;; block13: -;; @005e v46 = iadd.i64 v38, v42 -;; @005e v47 = load.i64 notrap aligned v46 -;; v67 = iconst.i64 -1 -;; @005e v48 = iadd v47, v67 ; v67 = -1 -;; v68 = iconst.i64 0 -;; @005e v49 = icmp eq v48, v68 ; v68 = 0 -;; @005e brif v49, block5, block6 +;; @005e v47 = iadd.i64 v39, v43 +;; @005e v48 = load.i64 notrap aligned v47 +;; v68 = iconst.i64 -1 +;; @005e v49 = iadd v48, v68 ; v68 = -1 +;; v69 = iconst.i64 0 +;; @005e v50 = icmp eq v49, v69 ; v69 = 0 +;; @005e brif v50, block5, block6 ;; ;; block5 cold: -;; @005e call fn0(v0, v12) +;; @005e call fn0(v0, v13) ;; @005e jump block7 ;; ;; block6: -;; @005e v52 = load.i64 notrap aligned readonly v0+40 -;; @005e v53 = load.i64 notrap aligned readonly v0+48 -;; @005e v54 = uextend.i64 v12 -;; @005e v55 = iconst.i64 8 -;; @005e v56 = uadd_overflow_trap v54, v55, user65535 ; v55 = 8 -;; @005e v57 = iconst.i64 8 -;; @005e v58 = uadd_overflow_trap v56, v57, user65535 ; v57 = 8 -;; @005e v59 = icmp ult v58, v53 -;; @005e brif v59, block15, block14 +;; @005e v53 = load.i64 notrap aligned readonly v0+40 +;; @005e v54 = load.i64 notrap aligned readonly v0+48 +;; @005e v55 = uextend.i64 v13 +;; @005e v56 = iconst.i64 8 +;; @005e v57 = uadd_overflow_trap v55, v56, user65535 ; v56 = 8 +;; @005e v58 = iconst.i64 8 +;; @005e v59 = uadd_overflow_trap v57, v58, user65535 ; v58 = 8 +;; @005e v60 = icmp ult v59, v54 +;; @005e brif v60, block15, block14 ;; ;; block14 cold: ;; @005e trap user65535 ;; ;; block15: -;; @005e v60 = iadd.i64 v52, v56 -;; @005e store.i64 notrap aligned v48, v60 +;; @005e v61 = iadd.i64 v53, v57 +;; @005e store.i64 notrap aligned v49, v61 ;; @005e jump block7 ;; ;; block7: diff --git a/tests/disas/typed-funcrefs.wat b/tests/disas/typed-funcrefs.wat index da55d039dfd7..9c5fb0fd5dfa 100644 --- a/tests/disas/typed-funcrefs.wat +++ b/tests/disas/typed-funcrefs.wat @@ -132,52 +132,52 @@ ;; gv2 = load.i64 notrap aligned gv1 ;; gv3 = vmctx ;; gv4 = load.i64 notrap aligned readonly gv3+88 -;; sig0 = (i64 vmctx, i32 uext, i32 uext) -> i64 system_v +;; sig0 = (i64 vmctx, i32 uext, i64) -> i64 system_v ;; sig1 = (i64 vmctx, i64, i32, i32, i32, i32) -> i32 tail ;; fn0 = colocated u1:9 sig0 ;; stack_limit = gv2 ;; ;; block0(v0: i64, v1: i64, v2: i32, v3: i32, v4: i32, v5: i32): ;; @0048 v12 = load.i64 notrap aligned readonly v0+88 -;; v66 = iconst.i64 8 -;; @0048 v14 = iadd v12, v66 ; v66 = 8 +;; v68 = iconst.i64 8 +;; @0048 v14 = iadd v12, v68 ; v68 = 8 ;; @0048 v17 = load.i64 table_oob aligned table v14 -;; v54 = iconst.i64 -2 -;; @0048 v18 = band v17, v54 ; v54 = -2 +;; v56 = iconst.i64 -2 +;; @0048 v18 = band v17, v56 ; v56 = -2 ;; @0048 brif v17, block3(v18), block2 ;; ;; block2 cold: ;; @003c v7 = iconst.i32 0 -;; @0046 v8 = iconst.i32 1 -;; @0048 v22 = call fn0(v0, v7, v8) ; v7 = 0, v8 = 1 -;; @0048 jump block3(v22) +;; v67 = iconst.i64 1 +;; @0048 v23 = call fn0(v0, v7, v67) ; v7 = 0, v67 = 1 +;; @0048 jump block3(v23) ;; ;; block3(v19: i64): -;; @004a v23 = load.i64 null_reference aligned readonly v19+8 -;; @004a v24 = load.i64 notrap aligned readonly v19+24 -;; @004a v25 = call_indirect sig1, v23(v24, v0, v2, v3, v4, v5) -;; v74 = iconst.i64 16 -;; @005b v38 = iadd.i64 v12, v74 ; v74 = 16 -;; @005b v41 = load.i64 table_oob aligned table v38 -;; v75 = iconst.i64 -2 -;; v76 = band v41, v75 ; v75 = -2 -;; @005b brif v41, block5(v76), block4 +;; @004a v24 = load.i64 null_reference aligned readonly v19+8 +;; @004a v25 = load.i64 notrap aligned readonly v19+24 +;; @004a v26 = call_indirect sig1, v24(v25, v0, v2, v3, v4, v5) +;; v76 = iconst.i64 16 +;; @005b v39 = iadd.i64 v12, v76 ; v76 = 16 +;; @005b v42 = load.i64 table_oob aligned table v39 +;; v77 = iconst.i64 -2 +;; v78 = band v42, v77 ; v77 = -2 +;; @005b brif v42, block5(v78), block4 ;; ;; block4 cold: -;; v77 = iconst.i32 0 -;; @0059 v32 = iconst.i32 2 -;; @005b v46 = call fn0(v0, v77, v32) ; v77 = 0, v32 = 2 -;; @005b jump block5(v46) +;; v79 = iconst.i32 0 +;; v75 = iconst.i64 2 +;; @005b v48 = call fn0(v0, v79, v75) ; v79 = 0, v75 = 2 +;; @005b jump block5(v48) ;; -;; block5(v43: i64): -;; @005d v47 = load.i64 null_reference aligned readonly v43+8 -;; @005d v48 = load.i64 notrap aligned readonly v43+24 -;; @005d v49 = call_indirect sig1, v47(v48, v0, v2, v3, v4, v5) +;; block5(v44: i64): +;; @005d v49 = load.i64 null_reference aligned readonly v44+8 +;; @005d v50 = load.i64 notrap aligned readonly v44+24 +;; @005d v51 = call_indirect sig1, v49(v50, v0, v2, v3, v4, v5) ;; @0066 jump block1 ;; ;; block1: -;; @0061 v51 = iadd.i32 v49, v25 -;; @0066 return v51 +;; @0061 v53 = iadd.i32 v51, v26 +;; @0066 return v53 ;; } ;; ;; function u0:2(i64 vmctx, i64, i32, i32, i32, i32) -> i32 tail { @@ -187,51 +187,51 @@ ;; gv3 = vmctx ;; gv4 = load.i64 notrap aligned readonly gv3+88 ;; sig0 = (i64 vmctx, i64, i32, i32, i32, i32) -> i32 tail -;; sig1 = (i64 vmctx, i32 uext, i32 uext) -> i64 system_v +;; sig1 = (i64 vmctx, i32 uext, i64) -> i64 system_v ;; fn0 = colocated u1:9 sig1 ;; stack_limit = gv2 ;; ;; block0(v0: i64, v1: i64, v2: i32, v3: i32, v4: i32, v5: i32): ;; @0075 v12 = load.i64 notrap aligned readonly v0+88 -;; v66 = iconst.i64 8 -;; @0075 v14 = iadd v12, v66 ; v66 = 8 +;; v68 = iconst.i64 8 +;; @0075 v14 = iadd v12, v68 ; v68 = 8 ;; @0075 v17 = load.i64 table_oob aligned table v14 -;; v54 = iconst.i64 -2 -;; @0075 v18 = band v17, v54 ; v54 = -2 +;; v56 = iconst.i64 -2 +;; @0075 v18 = band v17, v56 ; v56 = -2 ;; @0075 brif v17, block3(v18), block2 ;; ;; block2 cold: ;; @0069 v7 = iconst.i32 0 -;; @0073 v8 = iconst.i32 1 -;; @0075 v22 = call fn0(v0, v7, v8) ; v7 = 0, v8 = 1 -;; @0075 jump block3(v22) +;; v67 = iconst.i64 1 +;; @0075 v23 = call fn0(v0, v7, v67) ; v7 = 0, v67 = 1 +;; @0075 jump block3(v23) ;; ;; block3(v19: i64): -;; @0075 v23 = load.i64 icall_null aligned readonly v19+8 -;; @0075 v24 = load.i64 notrap aligned readonly v19+24 -;; @0075 v25 = call_indirect sig0, v23(v24, v0, v2, v3, v4, v5) -;; v74 = iconst.i64 16 -;; @0087 v38 = iadd.i64 v12, v74 ; v74 = 16 -;; @0087 v41 = load.i64 table_oob aligned table v38 -;; v75 = iconst.i64 -2 -;; v76 = band v41, v75 ; v75 = -2 -;; @0087 brif v41, block5(v76), block4 +;; @0075 v24 = load.i64 icall_null aligned readonly v19+8 +;; @0075 v25 = load.i64 notrap aligned readonly v19+24 +;; @0075 v26 = call_indirect sig0, v24(v25, v0, v2, v3, v4, v5) +;; v76 = iconst.i64 16 +;; @0087 v39 = iadd.i64 v12, v76 ; v76 = 16 +;; @0087 v42 = load.i64 table_oob aligned table v39 +;; v77 = iconst.i64 -2 +;; v78 = band v42, v77 ; v77 = -2 +;; @0087 brif v42, block5(v78), block4 ;; ;; block4 cold: -;; v77 = iconst.i32 0 -;; @0085 v32 = iconst.i32 2 -;; @0087 v46 = call fn0(v0, v77, v32) ; v77 = 0, v32 = 2 -;; @0087 jump block5(v46) +;; v79 = iconst.i32 0 +;; v75 = iconst.i64 2 +;; @0087 v48 = call fn0(v0, v79, v75) ; v79 = 0, v75 = 2 +;; @0087 jump block5(v48) ;; -;; block5(v43: i64): -;; @0087 v47 = load.i64 icall_null aligned readonly v43+8 -;; @0087 v48 = load.i64 notrap aligned readonly v43+24 -;; @0087 v49 = call_indirect sig0, v47(v48, v0, v2, v3, v4, v5) +;; block5(v44: i64): +;; @0087 v49 = load.i64 icall_null aligned readonly v44+8 +;; @0087 v50 = load.i64 notrap aligned readonly v44+24 +;; @0087 v51 = call_indirect sig0, v49(v50, v0, v2, v3, v4, v5) ;; @0091 jump block1 ;; ;; block1: -;; @008c v51 = iadd.i32 v49, v25 -;; @0091 return v51 +;; @008c v53 = iadd.i32 v51, v26 +;; @0091 return v53 ;; } ;; ;; function u0:3(i64 vmctx, i64, i32, i32, i32, i32) -> i32 tail { diff --git a/tests/disas/winch/x64/call_indirect/call_indirect.wat b/tests/disas/winch/x64/call_indirect/call_indirect.wat index 56dd4208cd1d..b5e7b9919d6a 100644 --- a/tests/disas/winch/x64/call_indirect/call_indirect.wat +++ b/tests/disas/winch/x64/call_indirect/call_indirect.wat @@ -37,7 +37,7 @@ ;; movq (%r11), %r11 ;; addq $0x30, %r11 ;; cmpq %rsp, %r11 -;; ja 0x1da +;; ja 0x1de ;; 1b: movq %rdi, %r14 ;; subq $0x20, %rsp ;; movq %rdi, 0x18(%rsp) @@ -50,17 +50,17 @@ ;; testl %eax, %eax ;; je 0x52 ;; 48: movl $1, %eax -;; jmp 0x1d4 +;; jmp 0x1d8 ;; 52: movl 0xc(%rsp), %eax ;; subl $2, %eax ;; subq $4, %rsp ;; movl %eax, (%rsp) ;; movl $0, %ecx ;; movq %r14, %rdx -;; movl 0x60(%rdx), %ebx -;; cmpl %ebx, %ecx -;; jae 0x1dc -;; 73: movl %ecx, %r11d +;; movq 0x60(%rdx), %rbx +;; cmpq %rbx, %rcx +;; jae 0x1e0 +;; 75: movq %rcx, %r11 ;; imulq $8, %r11, %r11 ;; movq 0x58(%rdx), %rdx ;; movq %rdx, %rsi @@ -69,27 +69,27 @@ ;; cmovaeq %rsi, %rdx ;; movq (%rdx), %rax ;; testq %rax, %rax -;; jne 0xc4 -;; 96: subq $4, %rsp +;; jne 0xc6 +;; 98: subq $4, %rsp ;; movl %ecx, (%rsp) ;; subq $8, %rsp ;; movq %r14, %rdi ;; movl $0, %esi ;; movl 8(%rsp), %edx -;; callq 0x2ef +;; callq 0x2f3 ;; addq $8, %rsp ;; addq $4, %rsp ;; movq 0x1c(%rsp), %r14 -;; jmp 0xc8 -;; c4: andq $0xfffffffffffffffe, %rax +;; jmp 0xca +;; c6: andq $0xfffffffffffffffe, %rax ;; testq %rax, %rax -;; je 0x1de -;; d1: movq 0x50(%r14), %r11 +;; je 0x1e2 +;; d3: movq 0x50(%r14), %r11 ;; movl (%r11), %ecx ;; movl 0x10(%rax), %edx ;; cmpl %edx, %ecx -;; jne 0x1e0 -;; e3: pushq %rax +;; jne 0x1e4 +;; e5: pushq %rax ;; popq %rcx ;; movq 0x18(%rcx), %r8 ;; movq 8(%rcx), %rbx @@ -109,10 +109,10 @@ ;; movl %ecx, (%rsp) ;; movl $0, %ecx ;; movq %r14, %rdx -;; movl 0x60(%rdx), %ebx -;; cmpl %ebx, %ecx -;; jae 0x1e2 -;; 132: movl %ecx, %r11d +;; movq 0x60(%rdx), %rbx +;; cmpq %rbx, %rcx +;; jae 0x1e6 +;; 136: movq %rcx, %r11 ;; imulq $8, %r11, %r11 ;; movq 0x58(%rdx), %rdx ;; movq %rdx, %rsi @@ -121,27 +121,27 @@ ;; cmovaeq %rsi, %rdx ;; movq (%rdx), %rax ;; testq %rax, %rax -;; jne 0x183 -;; 155: subq $4, %rsp +;; jne 0x187 +;; 159: subq $4, %rsp ;; movl %ecx, (%rsp) ;; subq $4, %rsp ;; movq %r14, %rdi ;; movl $0, %esi ;; movl 4(%rsp), %edx -;; callq 0x2ef +;; callq 0x2f3 ;; addq $4, %rsp ;; addq $4, %rsp ;; movq 0x20(%rsp), %r14 -;; jmp 0x187 -;; 183: andq $0xfffffffffffffffe, %rax +;; jmp 0x18b +;; 187: andq $0xfffffffffffffffe, %rax ;; testq %rax, %rax -;; je 0x1e4 -;; 190: movq 0x50(%r14), %r11 +;; je 0x1e8 +;; 194: movq 0x50(%r14), %r11 ;; movl (%r11), %ecx ;; movl 0x10(%rax), %edx ;; cmpl %edx, %ecx -;; jne 0x1e6 -;; 1a2: pushq %rax +;; jne 0x1ea +;; 1a6: pushq %rax ;; popq %rcx ;; movq 0x18(%rcx), %r8 ;; movq 8(%rcx), %rbx @@ -160,10 +160,10 @@ ;; addq $0x20, %rsp ;; popq %rbp ;; retq -;; 1da: ud2 -;; 1dc: ud2 ;; 1de: ud2 ;; 1e0: ud2 ;; 1e2: ud2 ;; 1e4: ud2 ;; 1e6: ud2 +;; 1e8: ud2 +;; 1ea: ud2 diff --git a/tests/disas/winch/x64/call_indirect/local_arg.wat b/tests/disas/winch/x64/call_indirect/local_arg.wat index c90cd6dd26d2..80e9c6b6ffc6 100644 --- a/tests/disas/winch/x64/call_indirect/local_arg.wat +++ b/tests/disas/winch/x64/call_indirect/local_arg.wat @@ -42,7 +42,7 @@ ;; movq (%r11), %r11 ;; addq $0x30, %r11 ;; cmpq %rsp, %r11 -;; ja 0x130 +;; ja 0x132 ;; 5b: movq %rdi, %r14 ;; subq $0x20, %rsp ;; movq %rdi, 0x18(%rsp) @@ -53,10 +53,10 @@ ;; movl %r11d, (%rsp) ;; movl $0, %ecx ;; movq %r14, %rdx -;; movl 0x60(%rdx), %ebx -;; cmpl %ebx, %ecx -;; jae 0x132 -;; 95: movl %ecx, %r11d +;; movq 0x60(%rdx), %rbx +;; cmpq %rbx, %rcx +;; jae 0x134 +;; 97: movq %rcx, %r11 ;; imulq $8, %r11, %r11 ;; movq 0x58(%rdx), %rdx ;; movq %rdx, %rsi @@ -65,27 +65,27 @@ ;; cmovaeq %rsi, %rdx ;; movq (%rdx), %rax ;; testq %rax, %rax -;; jne 0xe6 -;; b8: subq $4, %rsp +;; jne 0xe8 +;; ba: subq $4, %rsp ;; movl %ecx, (%rsp) ;; subq $8, %rsp ;; movq %r14, %rdi ;; movl $0, %esi ;; movl 8(%rsp), %edx -;; callq 0x30d +;; callq 0x30f ;; addq $8, %rsp ;; addq $4, %rsp ;; movq 0x1c(%rsp), %r14 -;; jmp 0xea -;; e6: andq $0xfffffffffffffffe, %rax +;; jmp 0xec +;; e8: andq $0xfffffffffffffffe, %rax ;; testq %rax, %rax -;; je 0x134 -;; f3: movq 0x50(%r14), %r11 +;; je 0x136 +;; f5: movq 0x50(%r14), %r11 ;; movl (%r11), %ecx ;; movl 0x10(%rax), %edx ;; cmpl %edx, %ecx -;; jne 0x136 -;; 105: movq 0x18(%rax), %rbx +;; jne 0x138 +;; 107: movq 0x18(%rax), %rbx ;; movq 8(%rax), %rcx ;; subq $0xc, %rsp ;; movq %rbx, %rdi @@ -98,7 +98,7 @@ ;; addq $0x20, %rsp ;; popq %rbp ;; retq -;; 130: ud2 ;; 132: ud2 ;; 134: ud2 ;; 136: ud2 +;; 138: ud2 diff --git a/tests/disas/winch/x64/table/fill.wat b/tests/disas/winch/x64/table/fill.wat index 65ccb3e59b89..43c45444ef6f 100644 --- a/tests/disas/winch/x64/table/fill.wat +++ b/tests/disas/winch/x64/table/fill.wat @@ -78,7 +78,7 @@ ;; movq (%r11), %r11 ;; addq $0x40, %r11 ;; cmpq %rsp, %r11 -;; ja 0x1d0 +;; ja 0x1d2 ;; db: movq %rdi, %r14 ;; subq $0x30, %rsp ;; movq %rdi, 0x28(%rsp) @@ -94,10 +94,10 @@ ;; movl (%rsp), %ecx ;; addq $4, %rsp ;; movq %r14, %rdx -;; movl 0x60(%rdx), %ebx -;; cmpl %ebx, %ecx -;; jae 0x1d2 -;; 12c: movl %ecx, %r11d +;; movq 0x60(%rdx), %rbx +;; cmpq %rbx, %rcx +;; jae 0x1d4 +;; 12e: movq %rcx, %r11 ;; imulq $8, %r11, %r11 ;; movq 0x58(%rdx), %rdx ;; movq %rdx, %rsi @@ -106,19 +106,19 @@ ;; cmovaeq %rsi, %rdx ;; movq (%rdx), %rax ;; testq %rax, %rax -;; jne 0x17d -;; 14f: subq $4, %rsp +;; jne 0x17f +;; 151: subq $4, %rsp ;; movl %ecx, (%rsp) ;; subq $0xc, %rsp ;; movq %r14, %rdi ;; movl $0, %esi ;; movl 0xc(%rsp), %edx -;; callq 0x4c1 +;; callq 0x4c3 ;; addq $0xc, %rsp ;; addq $4, %rsp ;; movq 0x28(%rsp), %r14 -;; jmp 0x181 -;; 17d: andq $0xfffffffffffffffe, %rax +;; jmp 0x183 +;; 17f: andq $0xfffffffffffffffe, %rax ;; movq %rax, 0xc(%rsp) ;; movl 0x1c(%rsp), %r11d ;; subq $4, %rsp @@ -139,5 +139,5 @@ ;; addq $0x30, %rsp ;; popq %rbp ;; retq -;; 1d0: ud2 ;; 1d2: ud2 +;; 1d4: ud2 diff --git a/tests/disas/winch/x64/table/get.wat b/tests/disas/winch/x64/table/get.wat index 6639318329e4..4facb29c3c50 100644 --- a/tests/disas/winch/x64/table/get.wat +++ b/tests/disas/winch/x64/table/get.wat @@ -34,7 +34,7 @@ ;; movq (%r11), %r11 ;; addq $0x30, %r11 ;; cmpq %rsp, %r11 -;; ja 0xed +;; ja 0xef ;; 5b: movq %rdi, %r14 ;; subq $0x20, %rsp ;; movq %rdi, 0x18(%rsp) @@ -46,10 +46,10 @@ ;; movl (%rsp), %ecx ;; addq $4, %rsp ;; movq %r14, %rdx -;; movl 0x60(%rdx), %ebx -;; cmpl %ebx, %ecx -;; jae 0xef -;; 92: movl %ecx, %r11d +;; movq 0x60(%rdx), %rbx +;; cmpq %rbx, %rcx +;; jae 0xf1 +;; 94: movq %rcx, %r11 ;; imulq $8, %r11, %r11 ;; movq 0x58(%rdx), %rdx ;; movq %rdx, %rsi @@ -58,21 +58,21 @@ ;; cmovaeq %rsi, %rdx ;; movq (%rdx), %rax ;; testq %rax, %rax -;; jne 0xe3 -;; b5: subq $4, %rsp +;; jne 0xe5 +;; b7: subq $4, %rsp ;; movl %ecx, (%rsp) ;; subq $0xc, %rsp ;; movq %r14, %rdi ;; movl $0, %esi ;; movl 0xc(%rsp), %edx -;; callq 0x2da +;; callq 0x2dc ;; addq $0xc, %rsp ;; addq $4, %rsp ;; movq 0x18(%rsp), %r14 -;; jmp 0xe7 -;; e3: andq $0xfffffffffffffffe, %rax +;; jmp 0xe9 +;; e5: andq $0xfffffffffffffffe, %rax ;; addq $0x20, %rsp ;; popq %rbp ;; retq -;; ed: ud2 ;; ef: ud2 +;; f1: ud2 diff --git a/tests/disas/winch/x64/table/init_copy_drop.wat b/tests/disas/winch/x64/table/init_copy_drop.wat index 25183063e34d..0bffbff0a972 100644 --- a/tests/disas/winch/x64/table/init_copy_drop.wat +++ b/tests/disas/winch/x64/table/init_copy_drop.wat @@ -142,11 +142,11 @@ ;; movl $7, %ecx ;; movl $0, %r8d ;; movl $4, %r9d -;; callq 0x8cd +;; callq 0x8cf ;; movq 8(%rsp), %r14 ;; movq %r14, %rdi ;; movl $1, %esi -;; callq 0x916 +;; callq 0x912 ;; movq 8(%rsp), %r14 ;; movq %r14, %rdi ;; movl $0, %esi @@ -154,11 +154,11 @@ ;; movl $0xf, %ecx ;; movl $1, %r8d ;; movl $3, %r9d -;; callq 0x8cd +;; callq 0x8cf ;; movq 8(%rsp), %r14 ;; movq %r14, %rdi ;; movl $3, %esi -;; callq 0x916 +;; callq 0x912 ;; movq 8(%rsp), %r14 ;; movq %r14, %rdi ;; movl $0, %esi @@ -166,7 +166,7 @@ ;; movl $0x14, %ecx ;; movl $0xf, %r8d ;; movl $5, %r9d -;; callq 0x955 +;; callq 0x951 ;; movq 8(%rsp), %r14 ;; movq %r14, %rdi ;; movl $0, %esi @@ -174,7 +174,7 @@ ;; movl $0x15, %ecx ;; movl $0x1d, %r8d ;; movl $1, %r9d -;; callq 0x955 +;; callq 0x951 ;; movq 8(%rsp), %r14 ;; movq %r14, %rdi ;; movl $0, %esi @@ -182,7 +182,7 @@ ;; movl $0x18, %ecx ;; movl $0xa, %r8d ;; movl $1, %r9d -;; callq 0x955 +;; callq 0x951 ;; movq 8(%rsp), %r14 ;; movq %r14, %rdi ;; movl $0, %esi @@ -190,7 +190,7 @@ ;; movl $0xd, %ecx ;; movl $0xb, %r8d ;; movl $4, %r9d -;; callq 0x955 +;; callq 0x951 ;; movq 8(%rsp), %r14 ;; movq %r14, %rdi ;; movl $0, %esi @@ -198,7 +198,7 @@ ;; movl $0x13, %ecx ;; movl $0x14, %r8d ;; movl $5, %r9d -;; callq 0x955 +;; callq 0x951 ;; movq 8(%rsp), %r14 ;; addq $0x10, %rsp ;; popq %rbp @@ -212,7 +212,7 @@ ;; movq (%r11), %r11 ;; addq $0x30, %r11 ;; cmpq %rsp, %r11 -;; ja 0x395 +;; ja 0x397 ;; 2cb: movq %rdi, %r14 ;; subq $0x20, %rsp ;; movq %rdi, 0x18(%rsp) @@ -224,10 +224,10 @@ ;; movl (%rsp), %ecx ;; addq $4, %rsp ;; movq %r14, %rdx -;; movl 0xd8(%rdx), %ebx -;; cmpl %ebx, %ecx -;; jae 0x397 -;; 305: movl %ecx, %r11d +;; movq 0xd8(%rdx), %rbx +;; cmpq %rbx, %rcx +;; jae 0x399 +;; 307: movq %rcx, %r11 ;; imulq $8, %r11, %r11 ;; movq 0xd0(%rdx), %rdx ;; movq %rdx, %rsi @@ -236,27 +236,27 @@ ;; cmovaeq %rsi, %rdx ;; movq (%rdx), %rax ;; testq %rax, %rax -;; jne 0x359 -;; 32b: subq $4, %rsp +;; jne 0x35b +;; 32d: subq $4, %rsp ;; movl %ecx, (%rsp) ;; subq $0xc, %rsp ;; movq %r14, %rdi ;; movl $0, %esi ;; movl 0xc(%rsp), %edx -;; callq 0x99e +;; callq 0x994 ;; addq $0xc, %rsp ;; addq $4, %rsp ;; movq 0x18(%rsp), %r14 -;; jmp 0x35d -;; 359: andq $0xfffffffffffffffe, %rax +;; jmp 0x35f +;; 35b: andq $0xfffffffffffffffe, %rax ;; testq %rax, %rax -;; je 0x399 -;; 366: movq 0x50(%r14), %r11 +;; je 0x39b +;; 368: movq 0x50(%r14), %r11 ;; movl (%r11), %ecx ;; movl 0x10(%rax), %edx ;; cmpl %edx, %ecx -;; jne 0x39b -;; 378: pushq %rax +;; jne 0x39d +;; 37a: pushq %rax ;; popq %rcx ;; movq 0x18(%rcx), %rbx ;; movq 8(%rcx), %rdx @@ -267,7 +267,7 @@ ;; addq $0x20, %rsp ;; popq %rbp ;; retq -;; 395: ud2 ;; 397: ud2 ;; 399: ud2 ;; 39b: ud2 +;; 39d: ud2 diff --git a/tests/disas/winch/x64/table/set.wat b/tests/disas/winch/x64/table/set.wat index 9c61a637a365..3f0567fd777c 100644 --- a/tests/disas/winch/x64/table/set.wat +++ b/tests/disas/winch/x64/table/set.wat @@ -39,7 +39,7 @@ ;; movq (%r11), %r11 ;; addq $0x20, %r11 ;; cmpq %rsp, %r11 -;; ja 0xae +;; ja 0xb0 ;; 5b: movq %rdi, %r14 ;; subq $0x20, %rsp ;; movq %rdi, 0x18(%rsp) @@ -49,10 +49,10 @@ ;; movq (%rsp), %rax ;; movl 0xc(%rsp), %ecx ;; movq %r14, %rdx -;; movl 0x60(%rdx), %ebx -;; cmpl %ebx, %ecx -;; jae 0xb0 -;; 8a: movl %ecx, %r11d +;; movq 0x60(%rdx), %rbx +;; cmpq %rbx, %rcx +;; jae 0xb2 +;; 8c: movq %rcx, %r11 ;; imulq $8, %r11, %r11 ;; movq 0x58(%rdx), %rdx ;; movq %rdx, %rsi @@ -64,8 +64,8 @@ ;; addq $0x20, %rsp ;; popq %rbp ;; retq -;; ae: ud2 ;; b0: ud2 +;; b2: ud2 ;; ;; wasm[0]::function[2]: ;; pushq %rbp @@ -74,7 +74,7 @@ ;; movq (%r11), %r11 ;; addq $0x30, %r11 ;; cmpq %rsp, %r11 -;; ja 0x1b1 +;; ja 0x1b5 ;; db: movq %rdi, %r14 ;; subq $0x20, %rsp ;; movq %rdi, 0x18(%rsp) @@ -90,10 +90,10 @@ ;; movl (%rsp), %ecx ;; addq $4, %rsp ;; movq %r14, %rdx -;; movl 0x60(%rdx), %ebx -;; cmpl %ebx, %ecx -;; jae 0x1b3 -;; 123: movl %ecx, %r11d +;; movq 0x60(%rdx), %rbx +;; cmpq %rbx, %rcx +;; jae 0x1b7 +;; 125: movq %rcx, %r11 ;; imulq $8, %r11, %r11 ;; movq 0x58(%rdx), %rdx ;; movq %rdx, %rsi @@ -102,26 +102,26 @@ ;; cmovaeq %rsi, %rdx ;; movq (%rdx), %rax ;; testq %rax, %rax -;; jne 0x174 -;; 146: subq $4, %rsp +;; jne 0x176 +;; 148: subq $4, %rsp ;; movl %ecx, (%rsp) ;; subq $8, %rsp ;; movq %r14, %rdi ;; movl $0, %esi ;; movl 8(%rsp), %edx -;; callq 0x496 +;; callq 0x49a ;; addq $8, %rsp ;; addq $4, %rsp ;; movq 0x1c(%rsp), %r14 -;; jmp 0x178 -;; 174: andq $0xfffffffffffffffe, %rax +;; jmp 0x17a +;; 176: andq $0xfffffffffffffffe, %rax ;; movl (%rsp), %ecx ;; addq $4, %rsp ;; movq %r14, %rdx -;; movl 0x60(%rdx), %ebx -;; cmpl %ebx, %ecx -;; jae 0x1b5 -;; 18d: movl %ecx, %r11d +;; movq 0x60(%rdx), %rbx +;; cmpq %rbx, %rcx +;; jae 0x1b9 +;; 191: movq %rcx, %r11 ;; imulq $8, %r11, %r11 ;; movq 0x58(%rdx), %rdx ;; movq %rdx, %rsi @@ -133,6 +133,6 @@ ;; addq $0x20, %rsp ;; popq %rbp ;; retq -;; 1b1: ud2 -;; 1b3: ud2 ;; 1b5: ud2 +;; 1b7: ud2 +;; 1b9: ud2 diff --git a/tests/disas/winch/x64/table/size.wat b/tests/disas/winch/x64/table/size.wat index 212ee6ce1144..989b897196fd 100644 --- a/tests/disas/winch/x64/table/size.wat +++ b/tests/disas/winch/x64/table/size.wat @@ -18,7 +18,7 @@ ;; movq %rdi, 8(%rsp) ;; movq %rsi, (%rsp) ;; movq %r14, %r11 -;; movl 0x60(%r11), %eax +;; movq 0x60(%r11), %rax ;; addq $0x10, %rsp ;; popq %rbp ;; retq diff --git a/tests/rlimited-memory.rs b/tests/rlimited-memory.rs index 1fe77f6a521b..3ee15a056106 100644 --- a/tests/rlimited-memory.rs +++ b/tests/rlimited-memory.rs @@ -28,9 +28,9 @@ impl ResourceLimiter for MemoryGrowFailureDetector { } fn table_growing( &mut self, - _current: u32, - _desired: u32, - _maximum: Option, + _current: usize, + _desired: usize, + _maximum: Option, ) -> Result { Ok(true) } diff --git a/tests/wast.rs b/tests/wast.rs index b84f4b8e8e8b..7a2f77c9b46b 100644 --- a/tests/wast.rs +++ b/tests/wast.rs @@ -234,17 +234,6 @@ fn should_fail(test: &Path, strategy: Strategy) -> bool { if part == "memory64" { if [ - // Wasmtime doesn't implement the table64 extension yet. - "call_indirect.wast", - "table_copy.wast", - "table_get.wast", - "table_set.wast", - "table_fill.wast", - "table.wast", - "table_init.wast", - "table_copy_mixed.wast", - "table_grow.wast", - "table_size.wast", // wasmtime doesn't implement exceptions yet "imports.wast", "ref_null.wast", diff --git a/winch/codegen/src/codegen/env.rs b/winch/codegen/src/codegen/env.rs index 772aff519080..baca13fc16a6 100644 --- a/winch/codegen/src/codegen/env.rs +++ b/winch/codegen/src/codegen/env.rs @@ -298,10 +298,9 @@ impl<'a, 'translation, 'data, P: PtrSize> FuncEnv<'a, 'translation, 'data, P> { import_from, current_length_offset, style, - ty: if plan.memory.memory64 { - WasmValType::I64 - } else { - WasmValType::I32 + ty: match plan.memory.idx_type { + wasmtime_environ::IndexType::I32 => WasmValType::I32, + wasmtime_environ::IndexType::I64 => WasmValType::I64, }, min_size, max_size, diff --git a/winch/codegen/src/visitor.rs b/winch/codegen/src/visitor.rs index e313300af127..9f5c256db4dd 100644 --- a/winch/codegen/src/visitor.rs +++ b/winch/codegen/src/visitor.rs @@ -1424,7 +1424,7 @@ where fn visit_table_get(&mut self, table: u32) { let table_index = TableIndex::from_u32(table); let plan = self.env.table_plan(table_index); - let heap_type = plan.table.wasm_ty.heap_type; + let heap_type = plan.table.ref_type.heap_type; let style = &plan.style; match heap_type { @@ -1447,7 +1447,7 @@ where fn visit_table_grow(&mut self, table: u32) { let table_index = TableIndex::from_u32(table); let table_plan = self.env.table_plan(table_index); - let builtin = match table_plan.table.wasm_ty.heap_type { + let builtin = match table_plan.table.ref_type.heap_type { WasmHeapType::Func => self.env.builtins.table_grow_func_ref::(), ty => unimplemented!("Support for HeapType: {ty}"), }; @@ -1485,7 +1485,7 @@ where fn visit_table_fill(&mut self, table: u32) { let table_index = TableIndex::from_u32(table); let table_plan = self.env.table_plan(table_index); - let builtin = match table_plan.table.wasm_ty.heap_type { + let builtin = match table_plan.table.ref_type.heap_type { WasmHeapType::Func => self.env.builtins.table_fill_func_ref::(), ty => unimplemented!("Support for heap type: {ty}"), }; @@ -1509,7 +1509,7 @@ where let table_index = TableIndex::from_u32(table); let table_data = self.env.resolve_table_data(table_index); let plan = self.env.table_plan(table_index); - match plan.table.wasm_ty.heap_type { + match plan.table.ref_type.heap_type { WasmHeapType::Func => match plan.style { TableStyle::CallerChecksSignature { lazy_init: true } => { let value = self.context.pop_to_reg(self.masm, None);