// Copyright 2015 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include #include #include "src/code-stubs.h" #include "src/debug/interface-types.h" #include "src/frames-inl.h" #include "src/objects.h" #include "src/property-descriptor.h" #include "src/simulator.h" #include "src/snapshot/snapshot.h" #include "src/v8.h" #include "src/wasm/compilation-manager.h" #include "src/wasm/module-compiler.h" #include "src/wasm/module-decoder.h" #include "src/wasm/wasm-code-specialization.h" #include "src/wasm/wasm-js.h" #include "src/wasm/wasm-limits.h" #include "src/wasm/wasm-module.h" #include "src/wasm/wasm-objects.h" #include "src/wasm/wasm-result.h" using namespace v8::internal; using namespace v8::internal::wasm; namespace base = v8::base; #define TRACE(...) \ do { \ if (FLAG_trace_wasm_instances) PrintF(__VA_ARGS__); \ } while (false) #define TRACE_CHAIN(instance) \ do { \ instance->PrintInstancesChain(); \ } while (false) #define TRACE_COMPILE(...) \ do { \ if (FLAG_trace_wasm_compiler) PrintF(__VA_ARGS__); \ } while (false) namespace { void* TryAllocateBackingStore(Isolate* isolate, size_t size, bool enable_guard_regions, void*& allocation_base, size_t& allocation_length) { // TODO(eholk): Right now enable_guard_regions has no effect on 32-bit // systems. It may be safer to fail instead, given that other code might do // things that would be unsafe if they expected guard pages where there // weren't any. if (enable_guard_regions && kGuardRegionsSupported) { // TODO(eholk): On Windows we want to make sure we don't commit the guard // pages yet. // We always allocate the largest possible offset into the heap, so the // addressable memory after the guard page can be made inaccessible. allocation_length = RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize()); DCHECK_EQ(0, size % base::OS::CommitPageSize()); // AllocateGuarded makes the whole region inaccessible by default. allocation_base = isolate->array_buffer_allocator()->Reserve(allocation_length); if (allocation_base == nullptr) { return nullptr; } void* memory = allocation_base; // Make the part we care about accessible. isolate->array_buffer_allocator()->SetProtection( memory, size, v8::ArrayBuffer::Allocator::Protection::kReadWrite); reinterpret_cast(isolate) ->AdjustAmountOfExternalAllocatedMemory(size); return memory; } else { void* memory = size == 0 ? nullptr : isolate->array_buffer_allocator()->Allocate(size); allocation_base = memory; allocation_length = size; return memory; } } static void InstanceFinalizer(const v8::WeakCallbackInfo& data) { DisallowHeapAllocation no_gc; JSObject** p = reinterpret_cast(data.GetParameter()); WasmInstanceObject* owner = reinterpret_cast(*p); Isolate* isolate = reinterpret_cast(data.GetIsolate()); // If a link to shared memory instances exists, update the list of memory // instances before the instance is destroyed. WasmCompiledModule* compiled_module = owner->compiled_module(); TRACE("Finalizing %d {\n", compiled_module->instance_id()); DCHECK(compiled_module->has_weak_wasm_module()); WeakCell* weak_wasm_module = compiled_module->ptr_to_weak_wasm_module(); if (trap_handler::UseTrapHandler()) { Handle code_table = compiled_module->code_table(); for (int i = 0; i < code_table->length(); ++i) { Handle code = code_table->GetValueChecked(isolate, i); int index = code->trap_handler_index()->value(); if (index >= 0) { trap_handler::ReleaseHandlerData(index); code->set_trap_handler_index(Smi::FromInt(-1)); } } } // Since the order of finalizers is not guaranteed, it can be the case // that {instance->compiled_module()->module()}, which is a // {Managed} has been collected earlier in this GC cycle. // Weak references to this instance won't be cleared until // the next GC cycle, so we need to manually break some links (such as // the weak references from {WasmMemoryObject::instances}. if (owner->has_memory_object()) { Handle memory(owner->memory_object(), isolate); Handle instance(owner, isolate); WasmMemoryObject::RemoveInstance(isolate, memory, instance); } // weak_wasm_module may have been cleared, meaning the module object // was GC-ed. In that case, there won't be any new instances created, // and we don't need to maintain the links between instances. if (!weak_wasm_module->cleared()) { WasmModuleObject* wasm_module = WasmModuleObject::cast(weak_wasm_module->value()); WasmCompiledModule* current_template = wasm_module->compiled_module(); TRACE("chain before {\n"); TRACE_CHAIN(current_template); TRACE("}\n"); DCHECK(!current_template->has_weak_prev_instance()); WeakCell* next = compiled_module->maybe_ptr_to_weak_next_instance(); WeakCell* prev = compiled_module->maybe_ptr_to_weak_prev_instance(); if (current_template == compiled_module) { if (next == nullptr) { WasmCompiledModule::Reset(isolate, compiled_module); } else { WasmCompiledModule* next_compiled_module = WasmCompiledModule::cast(next->value()); WasmModuleObject::cast(wasm_module) ->set_compiled_module(next_compiled_module); DCHECK_NULL(prev); next_compiled_module->reset_weak_prev_instance(); } } else { DCHECK(!(prev == nullptr && next == nullptr)); // the only reason prev or next would be cleared is if the // respective objects got collected, but if that happened, // we would have relinked the list. if (prev != nullptr) { DCHECK(!prev->cleared()); if (next == nullptr) { WasmCompiledModule::cast(prev->value())->reset_weak_next_instance(); } else { WasmCompiledModule::cast(prev->value()) ->set_ptr_to_weak_next_instance(next); } } if (next != nullptr) { DCHECK(!next->cleared()); if (prev == nullptr) { WasmCompiledModule::cast(next->value())->reset_weak_prev_instance(); } else { WasmCompiledModule::cast(next->value()) ->set_ptr_to_weak_prev_instance(prev); } } } TRACE("chain after {\n"); TRACE_CHAIN(wasm_module->compiled_module()); TRACE("}\n"); } compiled_module->reset_weak_owning_instance(); GlobalHandles::Destroy(reinterpret_cast(p)); TRACE("}\n"); } int AdvanceSourcePositionTableIterator(SourcePositionTableIterator& iterator, int offset) { DCHECK(!iterator.done()); int byte_pos; do { byte_pos = iterator.source_position().ScriptOffset(); iterator.Advance(); } while (!iterator.done() && iterator.code_offset() <= offset); return byte_pos; } int ExtractDirectCallIndex(wasm::Decoder& decoder, const byte* pc) { DCHECK_EQ(static_cast(kExprCallFunction), static_cast(*pc)); // Read the leb128 encoded u32 value (up to 5 bytes starting at pc + 1). decoder.Reset(pc + 1, pc + 6); uint32_t call_idx = decoder.consume_u32v("call index"); DCHECK(decoder.ok()); DCHECK_GE(kMaxInt, call_idx); return static_cast(call_idx); } void RecordLazyCodeStats(Code* code, Counters* counters) { counters->wasm_lazily_compiled_functions()->Increment(); counters->wasm_generated_code_size()->Increment(code->body_size()); counters->wasm_reloc_size()->Increment(code->relocation_info()->length()); } } // namespace // static const WasmExceptionSig wasm::WasmException::empty_sig_(0, 0, nullptr); Handle wasm::SetupArrayBuffer( Isolate* isolate, void* allocation_base, size_t allocation_length, void* backing_store, size_t size, bool is_external, bool enable_guard_regions, SharedFlag shared) { Handle buffer = isolate->factory()->NewJSArrayBuffer(shared); DCHECK_GE(kMaxInt, size); if (shared == SharedFlag::kShared) DCHECK(FLAG_experimental_wasm_threads); JSArrayBuffer::Setup(buffer, isolate, is_external, allocation_base, allocation_length, backing_store, static_cast(size), shared); buffer->set_is_neuterable(false); buffer->set_is_wasm_buffer(true); buffer->set_has_guard_region(enable_guard_regions); return buffer; } Handle wasm::NewArrayBuffer(Isolate* isolate, size_t size, bool enable_guard_regions, SharedFlag shared) { // Check against kMaxInt, since the byte length is stored as int in the // JSArrayBuffer. Note that wasm_max_mem_pages can be raised from the command // line, and we don't want to fail a CHECK then. if (size > FLAG_wasm_max_mem_pages * WasmModule::kPageSize || size > kMaxInt) { // TODO(titzer): lift restriction on maximum memory allocated here. return Handle::null(); } enable_guard_regions = enable_guard_regions && kGuardRegionsSupported; void* allocation_base = nullptr; // Set by TryAllocateBackingStore size_t allocation_length = 0; // Set by TryAllocateBackingStore void* memory = TryAllocateBackingStore(isolate, size, enable_guard_regions, allocation_base, allocation_length); if (size > 0 && memory == nullptr) { return Handle::null(); } #if DEBUG // Double check the API allocator actually zero-initialized the memory. const byte* bytes = reinterpret_cast(memory); for (size_t i = 0; i < size; ++i) { DCHECK_EQ(0, bytes[i]); } #endif constexpr bool is_external = false; return SetupArrayBuffer(isolate, allocation_base, allocation_length, memory, size, is_external, enable_guard_regions, shared); } void wasm::UnpackAndRegisterProtectedInstructions( Isolate* isolate, Handle code_table) { for (int i = 0; i < code_table->length(); ++i) { Handle code; // This is sometimes undefined when we're called from cctests. if (!code_table->GetValue(isolate, i).ToHandle(&code)) { continue; } if (code->kind() != Code::WASM_FUNCTION) { continue; } const intptr_t base = reinterpret_cast(code->entry()); Zone zone(isolate->allocator(), "Wasm Module"); ZoneVector unpacked(&zone); const int mode_mask = RelocInfo::ModeMask(RelocInfo::WASM_PROTECTED_INSTRUCTION_LANDING); for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) { trap_handler::ProtectedInstructionData data; data.instr_offset = it.rinfo()->data(); data.landing_offset = reinterpret_cast(it.rinfo()->pc()) - base; unpacked.emplace_back(data); } if (unpacked.size() > 0) { int size = code->CodeSize(); const int index = RegisterHandlerData(reinterpret_cast(base), size, unpacked.size(), &unpacked[0]); // TODO(eholk): if index is negative, fail. DCHECK(index >= 0); code->set_trap_handler_index(Smi::FromInt(index)); } } } std::ostream& wasm::operator<<(std::ostream& os, const WasmFunctionName& name) { os << "#" << name.function_->func_index; if (name.function_->name.is_set()) { if (name.name_.start()) { os << ":"; os.write(name.name_.start(), name.name_.length()); } } else { os << "?"; } return os; } WasmInstanceObject* wasm::GetOwningWasmInstance(Code* code) { DisallowHeapAllocation no_gc; DCHECK(code->kind() == Code::WASM_FUNCTION || code->kind() == Code::WASM_INTERPRETER_ENTRY); FixedArray* deopt_data = code->deoptimization_data(); DCHECK_EQ(code->kind() == Code::WASM_INTERPRETER_ENTRY ? 1 : 2, deopt_data->length()); Object* weak_link = deopt_data->get(0); DCHECK(weak_link->IsWeakCell()); WeakCell* cell = WeakCell::cast(weak_link); if (cell->cleared()) return nullptr; return WasmInstanceObject::cast(cell->value()); } WasmModule::WasmModule(std::unique_ptr owned) : signature_zone(std::move(owned)) {} WasmFunction* wasm::GetWasmFunctionForImportWrapper(Isolate* isolate, Handle target) { if (target->IsJSFunction()) { Handle func = Handle::cast(target); if (func->code()->kind() == Code::JS_TO_WASM_FUNCTION) { auto exported = Handle::cast(func); Handle other_instance(exported->instance(), isolate); int func_index = exported->function_index(); return &other_instance->module()->functions[func_index]; } } return nullptr; } Handle wasm::UnwrapImportWrapper(Handle import_wrapper) { Handle func = Handle::cast(import_wrapper); Handle export_wrapper_code = handle(func->code()); int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET); for (RelocIterator it(*export_wrapper_code, mask);; it.next()) { DCHECK(!it.done()); Code* target = Code::GetCodeFromTargetAddress(it.rinfo()->target_address()); if (target->kind() != Code::WASM_FUNCTION && target->kind() != Code::WASM_TO_JS_FUNCTION && target->kind() != Code::WASM_INTERPRETER_ENTRY) continue; // There should only be this one call to wasm code. #ifdef DEBUG for (it.next(); !it.done(); it.next()) { Code* code = Code::GetCodeFromTargetAddress(it.rinfo()->target_address()); DCHECK(code->kind() != Code::WASM_FUNCTION && code->kind() != Code::WASM_TO_JS_FUNCTION && code->kind() != Code::WASM_INTERPRETER_ENTRY); } #endif return handle(target); } UNREACHABLE(); } void wasm::UpdateDispatchTables(Isolate* isolate, Handle dispatch_tables, int index, WasmFunction* function, Handle code) { DCHECK_EQ(0, dispatch_tables->length() % 4); for (int i = 0; i < dispatch_tables->length(); i += 4) { int table_index = Smi::ToInt(dispatch_tables->get(i + 1)); Handle function_table( FixedArray::cast(dispatch_tables->get(i + 2)), isolate); Handle signature_table( FixedArray::cast(dispatch_tables->get(i + 3)), isolate); if (function) { // TODO(titzer): the signature might need to be copied to avoid // a dangling pointer in the signature map. Handle instance( WasmInstanceObject::cast(dispatch_tables->get(i)), isolate); auto& func_table = instance->module()->function_tables[table_index]; uint32_t sig_index = func_table.map.FindOrInsert(function->sig); signature_table->set(index, Smi::FromInt(static_cast(sig_index))); function_table->set(index, *code); } else { signature_table->set(index, Smi::FromInt(-1)); function_table->set(index, Smi::kZero); } } } void wasm::TableSet(ErrorThrower* thrower, Isolate* isolate, Handle table, int32_t index, Handle function) { Handle array(table->functions(), isolate); if (index < 0 || index >= array->length()) { thrower->RangeError("index out of bounds"); return; } Handle dispatch_tables(table->dispatch_tables(), isolate); WasmFunction* wasm_function = nullptr; Handle code = Handle::null(); Handle value = handle(isolate->heap()->null_value()); if (!function.is_null()) { wasm_function = GetWasmFunctionForImportWrapper(isolate, function); code = UnwrapImportWrapper(function); value = Handle::cast(function); } UpdateDispatchTables(isolate, dispatch_tables, index, wasm_function, code); array->set(index, *value); } Handle