ladybird/Libraries/LibJS/Bytecode/Interpreter.cpp

3408 lines
150 KiB
C++
Raw Normal View History

/*
* Copyright (c) 2021-2025, Andreas Kling <andreas@ladybird.org>
* Copyright (c) 2025, Aliaksandr Kalenik <kalenik.aliaksandr@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Debug.h>
#include <AK/HashTable.h>
#include <AK/TemporaryChange.h>
#include <LibGC/RootHashMap.h>
#include <LibJS/AST.h>
#include <LibJS/Bytecode/AsmInterpreter/AsmInterpreter.h>
#include <LibJS/Bytecode/BasicBlock.h>
#include <LibJS/Bytecode/FormatOperand.h>
#include <LibJS/Bytecode/Generator.h>
#include <LibJS/Bytecode/Instruction.h>
#include <LibJS/Bytecode/Interpreter.h>
#include <LibJS/Bytecode/Label.h>
#include <LibJS/Bytecode/Op.h>
#include <LibJS/Bytecode/PropertyAccess.h>
#include <LibJS/Export.h>
#include <LibJS/Runtime/AbstractOperations.h>
#include <LibJS/Runtime/Accessor.h>
#include <LibJS/Runtime/Array.h>
#include <LibJS/Runtime/AsyncFromSyncIterator.h>
#include <LibJS/Runtime/AsyncFromSyncIteratorPrototype.h>
#include <LibJS/Runtime/BigInt.h>
#include <LibJS/Runtime/ClassConstruction.h>
#include <LibJS/Runtime/CompletionCell.h>
#include <LibJS/Runtime/DeclarativeEnvironment.h>
#include <LibJS/Runtime/ECMAScriptFunctionObject.h>
#include <LibJS/Runtime/Environment.h>
#include <LibJS/Runtime/FunctionEnvironment.h>
#include <LibJS/Runtime/GeneratorResult.h>
#include <LibJS/Runtime/GlobalEnvironment.h>
#include <LibJS/Runtime/GlobalObject.h>
#include <LibJS/Runtime/Iterator.h>
#include <LibJS/Runtime/MathObject.h>
#include <LibJS/Runtime/ModuleEnvironment.h>
#include <LibJS/Runtime/NativeFunction.h>
#include <LibJS/Runtime/ObjectEnvironment.h>
#include <LibJS/Runtime/Realm.h>
#include <LibJS/Runtime/Reference.h>
#include <LibJS/Runtime/RegExpObject.h>
#include <LibJS/Runtime/TypedArray.h>
#include <LibJS/Runtime/Value.h>
#include <LibJS/Runtime/ValueInlines.h>
#include <LibJS/SourceTextModule.h>
#include <math.h>
namespace JS::Bytecode {
bool g_dump_bytecode = false;
ALWAYS_INLINE static ThrowCompletionOr<bool> loosely_inequals(VM& vm, Value src1, Value src2)
{
if (src1.tag() == src2.tag()) {
if (src1.is_int32() || src1.is_object() || src1.is_boolean() || src1.is_nullish())
return src1.encoded() != src2.encoded();
}
return !TRY(is_loosely_equal(vm, src1, src2));
}
ALWAYS_INLINE static ThrowCompletionOr<bool> loosely_equals(VM& vm, Value src1, Value src2)
{
if (src1.tag() == src2.tag()) {
if (src1.is_int32() || src1.is_object() || src1.is_boolean() || src1.is_nullish())
return src1.encoded() == src2.encoded();
}
return TRY(is_loosely_equal(vm, src1, src2));
}
ALWAYS_INLINE static ThrowCompletionOr<bool> strict_inequals(VM&, Value src1, Value src2)
{
if (src1.tag() == src2.tag()) {
if (src1.is_int32() || src1.is_object() || src1.is_boolean() || src1.is_nullish())
return src1.encoded() != src2.encoded();
}
return !is_strictly_equal(src1, src2);
}
ALWAYS_INLINE static ThrowCompletionOr<bool> strict_equals(VM&, Value src1, Value src2)
{
if (src1.tag() == src2.tag()) {
if (src1.is_int32() || src1.is_object() || src1.is_boolean() || src1.is_nullish())
return src1.encoded() == src2.encoded();
}
return is_strictly_equal(src1, src2);
}
Interpreter::Interpreter() = default;
Interpreter::~Interpreter() = default;
ALWAYS_INLINE Value Interpreter::do_yield(Value value, Optional<Label> continuation)
{
// FIXME: If we get a pointer, which is not accurately representable as a double
// will cause this to explode
auto continuation_value = continuation.has_value() ? Value(continuation->address()) : js_null();
return vm().heap().allocate<GeneratorResult>(value, continuation_value, false).ptr();
}
// 16.1.6 ScriptEvaluation ( scriptRecord ), https://tc39.es/ecma262/#sec-runtime-semantics-scriptevaluation
ThrowCompletionOr<Value> Interpreter::run(Script& script_record, GC::Ptr<Environment> lexical_environment_override)
{
auto& vm = this->vm();
// 1. Let globalEnv be scriptRecord.[[Realm]].[[GlobalEnv]].
auto& global_environment = script_record.realm().global_environment();
// NOTE: Spec steps are rearranged in order to compute number of registers+constants+locals before construction of the execution context.
// 12. Let result be Completion(GlobalDeclarationInstantiation(script, globalEnv)).
auto instantiation_result = script_record.global_declaration_instantiation(vm, global_environment);
Completion result = instantiation_result.is_throw_completion() ? instantiation_result.throw_completion() : normal_completion(js_undefined());
// 11. Let script be scriptRecord.[[ECMAScriptCode]].
GC::Ptr<Executable> executable = script_record.cached_executable();
if (!executable && result.type() == Completion::Type::Normal) {
executable = JS::Bytecode::Generator::generate_from_ast_node(vm, *script_record.parse_node(), {});
if (executable) {
script_record.cache_executable(*executable);
script_record.drop_ast();
}
}
if (executable && g_dump_bytecode)
executable->dump();
u32 registers_and_locals_count = 0;
u32 constants_count = 0;
if (executable) {
registers_and_locals_count = executable->registers_and_locals_count;
constants_count = executable->constants.size();
}
// 2. Let scriptContext be a new ECMAScript code execution context.
auto& stack = vm.interpreter_stack();
auto* stack_mark = stack.top();
auto* script_context = stack.allocate(registers_and_locals_count, constants_count, 0);
if (!script_context) [[unlikely]]
return vm.throw_completion<InternalError>(ErrorType::CallStackSizeExceeded);
ScopeGuard deallocate_guard = [&stack, stack_mark] { stack.deallocate(stack_mark); };
// 3. Set the Function of scriptContext to null.
// NOTE: This was done during execution context construction.
// 4. Set the Realm of scriptContext to scriptRecord.[[Realm]].
script_context->realm = &script_record.realm();
// 5. Set the ScriptOrModule of scriptContext to scriptRecord.
script_context->script_or_module = GC::Ref<Script>(script_record);
// 6. Set the VariableEnvironment of scriptContext to globalEnv.
script_context->variable_environment = &global_environment;
// 7. Set the LexicalEnvironment of scriptContext to globalEnv.
script_context->lexical_environment = &global_environment;
// Non-standard: Override the lexical environment if requested.
if (lexical_environment_override)
script_context->lexical_environment = lexical_environment_override;
// 8. Set the PrivateEnvironment of scriptContext to null.
// 9. Suspend the currently running execution context.
// 10. Push scriptContext onto the execution context stack; scriptContext is now the running execution context.
TRY(vm.push_execution_context(*script_context, {}));
// 13. If result.[[Type]] is normal, then
if (executable && result.type() == Completion::Type::Normal) {
// a. Set result to Completion(Evaluation of script).
result = run_executable(*script_context, *executable, {}, {});
// b. If result is a normal completion and result.[[Value]] is empty, then
if (result.type() == Completion::Type::Normal && result.value().is_special_empty_value()) {
// i. Set result to NormalCompletion(undefined).
result = normal_completion(js_undefined());
}
}
// 14. Suspend scriptContext and remove it from the execution context stack.
vm.pop_execution_context();
// 15. Assert: The execution context stack is not empty.
VERIFY(!vm.execution_context_stack().is_empty());
// FIXME: 16. Resume the context that is now on the top of the execution context stack as the running execution context.
vm.finish_execution_generation();
// 17. Return ? result.
if (result.is_abrupt()) {
VERIFY(result.type() == Completion::Type::Throw);
return result.release_error();
}
return result.value();
}
ThrowCompletionOr<Value> Interpreter::run(SourceTextModule& module)
{
// FIXME: This is not a entry point as defined in the spec, but is convenient.
// To avoid work we use link_and_eval_module however that can already be
// dangerous if the vm loaded other modules.
auto& vm = this->vm();
TRY(vm.link_and_eval_module(Badge<Bytecode::Interpreter> {}, module));
vm.run_queued_promise_jobs();
vm.run_queued_finalization_registry_cleanup_jobs();
return js_undefined();
}
Interpreter::HandleExceptionResponse Interpreter::handle_exception(u32 program_counter, Value exception)
{
for (;;) {
auto handlers = current_executable().exception_handlers_for_offset(program_counter);
if (handlers.has_value()) {
reg(Register::exception()) = exception;
m_running_execution_context->program_counter = handlers->handler_offset;
return HandleExceptionResponse::ContinueInThisExecutable;
}
// If we're in an inline frame, unwind to the caller and try its handlers.
if (m_running_execution_context->caller_frame) {
auto* callee_frame = m_running_execution_context;
auto* caller_frame = callee_frame->caller_frame;
auto caller_pc = callee_frame->caller_return_pc;
vm().pop_execution_context();
vm().interpreter_stack().deallocate(callee_frame);
m_running_execution_context = caller_frame;
// NB: caller_pc is the return address (one past the Call instruction).
// For handler lookup we need a PC inside the Call instruction,
// since the exception occurred during that call, not after it.
// Exception handler ranges use an exclusive end offset, so using
// caller_pc directly would miss a handler ending right at that address.
program_counter = caller_pc - 1;
continue;
}
reg(Register::exception()) = exception;
return HandleExceptionResponse::ExitFromExecutable;
}
}
ExecutionContext* Interpreter::push_inline_frame(
ECMAScriptFunctionObject& callee_function,
Executable& callee_executable,
ReadonlySpan<Operand> arguments,
u32 return_pc,
u32 dst_raw,
Value this_value,
Object* new_target,
bool is_construct)
{
auto& stack = vm().interpreter_stack();
u32 insn_argument_count = arguments.size();
size_t registers_and_locals_count = callee_executable.registers_and_locals_count;
size_t constants_count = callee_executable.constants.size();
size_t argument_count = max(insn_argument_count, static_cast<u32>(callee_function.formal_parameter_count()));
auto* callee_context = stack.allocate(registers_and_locals_count, constants_count, argument_count);
if (!callee_context) [[unlikely]]
return nullptr;
// Copy arguments from caller's registers into callee's argument slots.
auto* callee_argument_values = callee_context->arguments.data();
for (u32 i = 0; i < insn_argument_count; ++i)
callee_argument_values[i] = get(arguments[i]);
for (size_t i = insn_argument_count; i < argument_count; ++i)
callee_argument_values[i] = js_undefined();
callee_context->passed_argument_count = insn_argument_count;
// Set up caller linkage so Return can restore the caller frame.
callee_context->caller_frame = m_running_execution_context;
callee_context->caller_executable = m_running_execution_context->executable;
callee_context->caller_dst_raw = dst_raw;
callee_context->caller_return_pc = return_pc;
callee_context->caller_is_construct = is_construct;
// Inlined PrepareForOrdinaryCall (avoids function call overhead on hot path).
callee_context->function = &callee_function;
callee_context->realm = callee_function.realm();
callee_context->script_or_module = callee_function.m_script_or_module;
if (callee_function.function_environment_needed()) {
auto local_environment = new_function_environment(callee_function, new_target);
local_environment->ensure_capacity(callee_function.shared_data().m_function_environment_bindings_count);
callee_context->lexical_environment = local_environment;
callee_context->variable_environment = local_environment;
} else {
callee_context->lexical_environment = callee_function.environment();
callee_context->variable_environment = callee_function.environment();
}
callee_context->private_environment = callee_function.m_private_environment;
// Fast-path push onto execution context stack (avoids Vector::append growth check preventing inlining).
auto& ec_stack = vm().execution_context_stack();
if (ec_stack.size() < ec_stack.capacity()) [[likely]]
ec_stack.unchecked_append(callee_context);
else
ec_stack.append(callee_context);
// Bind this if the function uses it.
if (callee_function.uses_this())
callee_function.ordinary_call_bind_this(vm(), *callee_context, this_value);
// Set up execution context fields that run_executable normally does.
// NB: We must use the callee's realm (not the caller's) for global_object
// and global_declarative_environment, since the caller's realm may differ
// in cross-realm calls (e.g. iframe <-> parent).
callee_context->executable = callee_executable;
// Copy constants (memcpy avoids aliasing issues with the scalar loop).
auto* values = callee_context->registers_and_constants_and_locals_and_arguments();
if (auto count = callee_executable.constants.size())
memcpy(values + callee_executable.registers_and_locals_count,
callee_executable.constants.data(),
count * sizeof(Value));
// Set this value register.
values[Register::this_value().index()] = callee_context->this_value.value_or(js_special_empty_value());
return callee_context;
}
NEVER_INLINE bool Interpreter::try_inline_call(Instruction const& insn, u32 current_pc)
{
auto& instruction = static_cast<Op::Call const&>(insn);
auto callee = get(instruction.callee());
if (!callee.is_object())
return false;
auto& callee_object = callee.as_object();
if (!is<ECMAScriptFunctionObject>(callee_object))
return false;
auto& callee_function = static_cast<ECMAScriptFunctionObject&>(callee_object);
if (callee_function.kind() != FunctionKind::Normal
|| callee_function.is_class_constructor()
|| !callee_function.bytecode_executable())
return false;
u32 return_pc = current_pc + instruction.length();
auto* callee_context = push_inline_frame(
callee_function, *callee_function.bytecode_executable(),
instruction.arguments(), return_pc, instruction.dst().raw(),
get(instruction.this_value()), nullptr, false);
if (!callee_context) [[unlikely]]
return false;
m_running_execution_context = callee_context;
return true;
}
NEVER_INLINE bool Interpreter::try_inline_call_construct(Instruction const& insn, u32 current_pc)
{
auto& instruction = static_cast<Op::CallConstruct const&>(insn);
auto callee = get(instruction.callee());
if (!callee.is_object())
return false;
auto& callee_object = callee.as_object();
if (!is<ECMAScriptFunctionObject>(callee_object))
return false;
auto& callee_function = static_cast<ECMAScriptFunctionObject&>(callee_object);
if (!callee_function.has_constructor()
|| callee_function.constructor_kind() != ConstructorKind::Base
|| !callee_function.bytecode_executable())
return false;
// OrdinaryCreateFromConstructor: create the this object.
auto prototype_or_error = get_prototype_from_constructor(vm(), callee_function, &Intrinsics::object_prototype);
if (prototype_or_error.is_error()) [[unlikely]]
return false;
auto this_argument = Object::create(realm(), prototype_or_error.release_value());
u32 return_pc = current_pc + instruction.length();
auto* callee_context = push_inline_frame(
callee_function, *callee_function.bytecode_executable(),
instruction.arguments(), return_pc, instruction.dst().raw(),
this_argument, &callee_function, true);
if (!callee_context) [[unlikely]]
return false;
// Ensure this_value is set for construct return semantics.
if (!callee_context->this_value.has_value())
callee_context->this_value = Value(this_argument);
// InitializeInstanceElements (can throw).
auto init_result = this_argument->initialize_instance_elements(callee_function);
if (init_result.is_throw_completion()) [[unlikely]] {
vm().pop_execution_context();
vm().interpreter_stack().deallocate(callee_context);
return false;
}
m_running_execution_context = callee_context;
return true;
}
NEVER_INLINE void Interpreter::pop_inline_frame(Value return_value)
{
auto* callee_frame = m_running_execution_context;
auto* caller_frame = callee_frame->caller_frame;
auto caller_dst_raw = callee_frame->caller_dst_raw;
auto caller_pc = callee_frame->caller_return_pc;
// For base constructor calls, apply construct return semantics.
if (callee_frame->caller_is_construct && !return_value.is_object())
return_value = callee_frame->this_value.value();
vm().pop_execution_context();
vm().interpreter_stack().deallocate(callee_frame);
m_running_execution_context = caller_frame;
caller_frame->program_counter = caller_pc;
caller_frame->registers_and_constants_and_locals_and_arguments()[caller_dst_raw] = return_value;
vm().finish_execution_generation();
}
void Interpreter::run_bytecode(size_t entry_point)
{
if (vm().interpreter_stack().is_exhausted() || vm().did_reach_stack_space_limit()) [[unlikely]] {
reg(Register::exception()) = vm().throw_completion<InternalError>(ErrorType::CallStackSizeExceeded).value();
return;
}
static bool const use_cpp_interpreter = []() {
auto const* env = getenv("LIBJS_USE_CPP_INTERPRETER");
return env && env[0] == '1';
}();
if (!use_cpp_interpreter && AsmInterpreter::is_available()) {
AsmInterpreter::run(*this, entry_point);
return;
}
u8 const* bytecode;
u32 program_counter;
// Declare a lookup table for computed goto with each of the `handle_*` labels
// to avoid the overhead of a switch statement.
// This is a GCC extension, but it's also supported by Clang.
static void* const bytecode_dispatch_table[] = {
#define SET_UP_LABEL(name) &&handle_##name,
ENUMERATE_BYTECODE_OPS(SET_UP_LABEL)
};
#undef SET_UP_LABEL
#define DISPATCH_NEXT(name) \
do { \
if constexpr (Op::name::IsVariableLength) \
program_counter += instruction.length(); \
else \
program_counter += sizeof(Op::name); \
m_running_execution_context->program_counter = program_counter; \
auto& next_instruction = *reinterpret_cast<Instruction const*>(&bytecode[program_counter]); \
goto* bytecode_dispatch_table[static_cast<size_t>(next_instruction.type())]; \
} while (0)
// Reload bytecode and program_counter from the execution context after
// operations that may have changed the current executable (handle_exception
// unwinding inline frames, try_inline_call, pop_inline_frame).
#define RELOAD_AND_GOTO_START() \
do { \
bytecode = m_running_execution_context->executable->bytecode.data(); \
program_counter = m_running_execution_context->program_counter; \
goto start; \
} while (0)
bytecode = current_executable().bytecode.data();
program_counter = entry_point;
for (;;) {
start:
m_running_execution_context->program_counter = program_counter;
for (;;) {
goto* bytecode_dispatch_table[static_cast<size_t>((*reinterpret_cast<Instruction const*>(&bytecode[program_counter])).type())];
handle_Mov: {
auto& instruction = *reinterpret_cast<Op::Mov const*>(&bytecode[program_counter]);
set(instruction.dst(), get(instruction.src()));
DISPATCH_NEXT(Mov);
}
handle_End: {
auto& instruction = *reinterpret_cast<Op::End const*>(&bytecode[program_counter]);
auto value = get(instruction.value());
if (value.is_special_empty_value())
value = js_undefined();
if (m_running_execution_context->caller_frame) {
pop_inline_frame(value);
RELOAD_AND_GOTO_START();
}
reg(Register::return_value()) = value;
return;
}
handle_Jump: {
auto& instruction = *reinterpret_cast<Op::Jump const*>(&bytecode[program_counter]);
program_counter = instruction.target().address();
goto start;
}
handle_JumpIf: {
auto& instruction = *reinterpret_cast<Op::JumpIf const*>(&bytecode[program_counter]);
if (get(instruction.condition()).to_boolean())
program_counter = instruction.true_target().address();
else
program_counter = instruction.false_target().address();
goto start;
}
handle_JumpTrue: {
auto& instruction = *reinterpret_cast<Op::JumpTrue const*>(&bytecode[program_counter]);
if (get(instruction.condition()).to_boolean()) {
program_counter = instruction.target().address();
goto start;
}
DISPATCH_NEXT(JumpTrue);
}
handle_JumpFalse: {
auto& instruction = *reinterpret_cast<Op::JumpFalse const*>(&bytecode[program_counter]);
if (!get(instruction.condition()).to_boolean()) {
program_counter = instruction.target().address();
goto start;
}
DISPATCH_NEXT(JumpFalse);
}
handle_JumpNullish: {
auto& instruction = *reinterpret_cast<Op::JumpNullish const*>(&bytecode[program_counter]);
if (get(instruction.condition()).is_nullish())
program_counter = instruction.true_target().address();
else
program_counter = instruction.false_target().address();
goto start;
}
#define HANDLE_COMPARISON_OP(op_TitleCase, op_snake_case, numeric_operator) \
handle_Jump##op_TitleCase: \
{ \
auto& instruction = *reinterpret_cast<Op::Jump##op_TitleCase const*>(&bytecode[program_counter]); \
auto lhs = get(instruction.lhs()); \
auto rhs = get(instruction.rhs()); \
if (lhs.is_number() && rhs.is_number()) [[likely]] { \
bool result; \
if (lhs.is_int32() && rhs.is_int32()) { \
result = lhs.as_i32() numeric_operator rhs.as_i32(); \
} else { \
result = lhs.as_double() numeric_operator rhs.as_double(); \
} \
program_counter = result ? instruction.true_target().address() : instruction.false_target().address(); \
goto start; \
} \
auto result = op_snake_case(vm(), get(instruction.lhs()), get(instruction.rhs())); \
if (result.is_error()) [[unlikely]] { \
if (handle_exception(program_counter, result.error_value()) == HandleExceptionResponse::ExitFromExecutable) \
return; \
RELOAD_AND_GOTO_START(); \
} \
if (result.value()) \
program_counter = instruction.true_target().address(); \
else \
program_counter = instruction.false_target().address(); \
goto start; \
}
JS_ENUMERATE_COMPARISON_OPS(HANDLE_COMPARISON_OP)
#undef HANDLE_COMPARISON_OP
handle_JumpUndefined: {
auto& instruction = *reinterpret_cast<Op::JumpUndefined const*>(&bytecode[program_counter]);
if (get(instruction.condition()).is_undefined())
program_counter = instruction.true_target().address();
else
program_counter = instruction.false_target().address();
goto start;
}
#define HANDLE_INSTRUCTION(name) \
handle_##name: \
{ \
auto& instruction = *reinterpret_cast<Op::name const*>(&bytecode[program_counter]); \
{ \
auto result = instruction.execute_impl(*this); \
if (result.is_error()) [[unlikely]] { \
if (handle_exception(program_counter, result.error_value()) == HandleExceptionResponse::ExitFromExecutable) \
return; \
RELOAD_AND_GOTO_START(); \
} \
} \
DISPATCH_NEXT(name); \
}
#define HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(name) \
handle_##name: \
{ \
auto& instruction = *reinterpret_cast<Op::name const*>(&bytecode[program_counter]); \
instruction.execute_impl(*this); \
DISPATCH_NEXT(name); \
}
HANDLE_INSTRUCTION(Add);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(AddPrivateName);
HANDLE_INSTRUCTION(ArrayAppend);
HANDLE_INSTRUCTION(BitwiseAnd);
HANDLE_INSTRUCTION(BitwiseNot);
HANDLE_INSTRUCTION(BitwiseOr);
HANDLE_INSTRUCTION(ToInt32);
HANDLE_INSTRUCTION(ToString);
HANDLE_INSTRUCTION(ToPrimitiveWithStringHint);
HANDLE_INSTRUCTION(BitwiseXor);
handle_Call: {
auto& instruction = *reinterpret_cast<Op::Call const*>(&bytecode[program_counter]);
if (try_inline_call(instruction, program_counter))
RELOAD_AND_GOTO_START();
auto result = instruction.execute_impl(*this);
if (result.is_error()) [[unlikely]] {
if (handle_exception(program_counter, result.error_value()) == HandleExceptionResponse::ExitFromExecutable)
return;
RELOAD_AND_GOTO_START();
}
DISPATCH_NEXT(Call);
}
HANDLE_INSTRUCTION(CallBuiltin);
handle_CallConstruct: {
auto& instruction = *reinterpret_cast<Op::CallConstruct const*>(&bytecode[program_counter]);
if (try_inline_call_construct(instruction, program_counter))
RELOAD_AND_GOTO_START();
auto result = instruction.execute_impl(*this);
if (result.is_error()) [[unlikely]] {
if (handle_exception(program_counter, result.error_value()) == HandleExceptionResponse::ExitFromExecutable)
return;
RELOAD_AND_GOTO_START();
}
DISPATCH_NEXT(CallConstruct);
}
HANDLE_INSTRUCTION(CallConstructWithArgumentArray);
HANDLE_INSTRUCTION(CallDirectEval);
HANDLE_INSTRUCTION(CallDirectEvalWithArgumentArray);
HANDLE_INSTRUCTION(CallWithArgumentArray);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(Catch);
HANDLE_INSTRUCTION(ConcatString);
HANDLE_INSTRUCTION(CopyObjectExcludingProperties);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(CreateAsyncFromSyncIterator);
HANDLE_INSTRUCTION(CreateDataPropertyOrThrow);
HANDLE_INSTRUCTION(CreateImmutableBinding);
HANDLE_INSTRUCTION(CreateMutableBinding);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(CreateLexicalEnvironment);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(CreateVariableEnvironment);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(CreatePrivateEnvironment);
HANDLE_INSTRUCTION(CreateVariable);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(CreateRestParams);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(CreateArguments);
HANDLE_INSTRUCTION(Decrement);
HANDLE_INSTRUCTION(DeleteById);
HANDLE_INSTRUCTION(DeleteByValue);
HANDLE_INSTRUCTION(DeleteVariable);
HANDLE_INSTRUCTION(Div);
HANDLE_INSTRUCTION(EnterObjectEnvironment);
HANDLE_INSTRUCTION(Exp);
HANDLE_INSTRUCTION(GetById);
HANDLE_INSTRUCTION(GetByIdWithThis);
HANDLE_INSTRUCTION(GetByValue);
HANDLE_INSTRUCTION(GetByValueWithThis);
HANDLE_INSTRUCTION(GetCalleeAndThisFromEnvironment);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(GetCompletionFields);
HANDLE_INSTRUCTION(GetGlobal);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(GetImportMeta);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(GetLexicalEnvironment);
HANDLE_INSTRUCTION(GetIterator);
HANDLE_INSTRUCTION(GetLength);
HANDLE_INSTRUCTION(GetLengthWithThis);
HANDLE_INSTRUCTION(GetMethod);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(GetNewTarget);
HANDLE_INSTRUCTION(GetObjectPropertyIterator);
HANDLE_INSTRUCTION(GetPrivateById);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(GetTemplateObject);
HANDLE_INSTRUCTION(GetBinding);
HANDLE_INSTRUCTION(GetInitializedBinding);
HANDLE_INSTRUCTION(GreaterThan);
HANDLE_INSTRUCTION(GreaterThanEquals);
HANDLE_INSTRUCTION(HasPrivateId);
HANDLE_INSTRUCTION(ImportCall);
HANDLE_INSTRUCTION(In);
HANDLE_INSTRUCTION(Increment);
HANDLE_INSTRUCTION(InitializeLexicalBinding);
HANDLE_INSTRUCTION(InitializeVariableBinding);
HANDLE_INSTRUCTION(InstanceOf);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(IsCallable);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(IsConstructor);
HANDLE_INSTRUCTION(IteratorClose);
HANDLE_INSTRUCTION(IteratorNext);
HANDLE_INSTRUCTION(IteratorNextUnpack);
HANDLE_INSTRUCTION(IteratorToArray);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(LeavePrivateEnvironment);
HANDLE_INSTRUCTION(LeftShift);
HANDLE_INSTRUCTION(LessThan);
HANDLE_INSTRUCTION(LessThanEquals);
HANDLE_INSTRUCTION(LooselyEquals);
HANDLE_INSTRUCTION(LooselyInequals);
HANDLE_INSTRUCTION(Mod);
HANDLE_INSTRUCTION(Mul);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(NewArray);
HANDLE_INSTRUCTION(NewArrayWithLength);
HANDLE_INSTRUCTION(NewClass);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(NewFunction);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(NewObject);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(CacheObjectShape);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(InitObjectLiteralProperty);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(NewObjectWithNoPrototype);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(NewPrimitiveArray);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(NewRegExp);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(NewReferenceError);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(NewTypeError);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(Not);
HANDLE_INSTRUCTION(PostfixDecrement);
HANDLE_INSTRUCTION(PostfixIncrement);
HANDLE_INSTRUCTION(PutById);
HANDLE_INSTRUCTION(PutByIdWithThis);
HANDLE_INSTRUCTION(PutByValue);
HANDLE_INSTRUCTION(PutByValueWithThis);
HANDLE_INSTRUCTION(PutBySpread);
HANDLE_INSTRUCTION(PutPrivateById);
HANDLE_INSTRUCTION(ResolveSuperBase);
HANDLE_INSTRUCTION(ResolveThisBinding);
HANDLE_INSTRUCTION(RightShift);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(SetCompletionType);
HANDLE_INSTRUCTION(SetGlobal);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(SetLexicalEnvironment);
HANDLE_INSTRUCTION(SetLexicalBinding);
HANDLE_INSTRUCTION(SetVariableBinding);
HANDLE_INSTRUCTION(StrictlyEquals);
HANDLE_INSTRUCTION(StrictlyInequals);
HANDLE_INSTRUCTION(Sub);
HANDLE_INSTRUCTION(SuperCallWithArgumentArray);
HANDLE_INSTRUCTION(ThrowIfNotObject);
HANDLE_INSTRUCTION(ThrowIfNullish);
HANDLE_INSTRUCTION(ThrowIfTDZ);
HANDLE_INSTRUCTION(ThrowConstAssignment);
HANDLE_INSTRUCTION(ToLength);
HANDLE_INSTRUCTION(ToObject);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(ToBoolean);
HANDLE_INSTRUCTION_WITHOUT_EXCEPTION_CHECK(Typeof);
HANDLE_INSTRUCTION(TypeofBinding);
HANDLE_INSTRUCTION(UnaryMinus);
HANDLE_INSTRUCTION(UnaryPlus);
HANDLE_INSTRUCTION(UnsignedRightShift);
handle_Throw: {
auto& instruction = *reinterpret_cast<Op::Throw const*>(&bytecode[program_counter]);
auto result = instruction.execute_impl(*this);
if (handle_exception(program_counter, result.error_value()) == HandleExceptionResponse::ExitFromExecutable)
return;
RELOAD_AND_GOTO_START();
}
handle_Await: {
auto& instruction = *reinterpret_cast<Op::Await const*>(&bytecode[program_counter]);
instruction.execute_impl(*this);
return;
}
handle_Return: {
auto& instruction = *reinterpret_cast<Op::Return const*>(&bytecode[program_counter]);
auto return_value = get(instruction.value());
if (return_value.is_special_empty_value())
return_value = js_undefined();
if (m_running_execution_context->caller_frame) {
pop_inline_frame(return_value);
RELOAD_AND_GOTO_START();
}
reg(Register::return_value()) = return_value;
reg(Register::exception()) = js_special_empty_value();
return;
}
handle_Yield: {
auto& instruction = *reinterpret_cast<Op::Yield const*>(&bytecode[program_counter]);
instruction.execute_impl(*this);
return;
}
}
}
}
Utf16FlyString const& Interpreter::get_identifier(IdentifierTableIndex index) const
{
return m_running_execution_context->executable->get_identifier(index);
}
PropertyKey const& Interpreter::get_property_key(PropertyKeyTableIndex index) const
{
return m_running_execution_context->executable->get_property_key(index);
}
DeclarativeEnvironment& Interpreter::global_declarative_environment()
{
return realm().global_declarative_environment();
}
ThrowCompletionOr<Value> Interpreter::run_executable(ExecutionContext& context, Executable& executable, Optional<size_t> entry_point)
{
dbgln_if(JS_BYTECODE_DEBUG, "Bytecode::Interpreter will run unit {}", &executable);
// NOTE: This is how we "push" a new execution context onto the interpreter stack.
TemporaryChange restore_running_execution_context { m_running_execution_context, &context };
context.executable = executable;
VERIFY(executable.registers_and_locals_count + executable.constants.size() == executable.registers_and_locals_and_constants_count);
VERIFY(executable.registers_and_locals_and_constants_count <= context.registers_and_constants_and_locals_and_arguments_span().size());
// NOTE: We only copy the `this` value from ExecutionContext if it's not already set.
// If we are re-entering an async/generator context, the `this` value
// may have already been cached by a ResolveThisBinding instruction,
// and subsequent instructions expect this value to be set.
if (reg(Register::this_value()).is_special_empty_value())
reg(Register::this_value()) = context.this_value.value_or(js_special_empty_value());
// NB: Layout is [registers | locals | constants | arguments], so constants start after registers+locals.
auto* values = context.registers_and_constants_and_locals_and_arguments();
if (auto count = executable.constants.size())
memcpy(values + executable.registers_and_locals_count,
executable.constants.data(),
count * sizeof(Value));
run_bytecode(entry_point.value_or(0));
dbgln_if(JS_BYTECODE_DEBUG, "Bytecode::Interpreter did run unit {}", context.executable);
if constexpr (JS_BYTECODE_DEBUG) {
for (size_t i = 0; i < executable.number_of_registers; ++i) {
String value_string;
if (values[i].is_special_empty_value())
value_string = "(empty)"_string;
else
value_string = values[i].to_string_without_side_effects();
dbgln("[{:3}] {}", i, value_string);
}
}
vm().run_queued_promise_jobs();
vm().finish_execution_generation();
auto exception = reg(Register::exception());
if (!exception.is_special_empty_value()) [[unlikely]]
return throw_completion(exception);
return reg(Register::return_value());
}
void Interpreter::catch_exception(Operand dst)
{
set(dst, reg(Register::exception()));
reg(Register::exception()) = js_special_empty_value();
}
GC::Ref<Bytecode::Executable> compile(VM& vm, ASTNode const& node, FunctionKind kind, Utf16FlyString const& name)
{
auto bytecode_executable = Bytecode::Generator::generate_from_ast_node(vm, node, kind);
bytecode_executable->name = name;
if (Bytecode::g_dump_bytecode)
bytecode_executable->dump();
return bytecode_executable;
}
GC::Ref<Bytecode::Executable> compile(VM& vm, GC::Ref<SharedFunctionInstanceData const> shared_function_instance_data, BuiltinAbstractOperationsEnabled builtin_abstract_operations_enabled)
{
auto const& name = shared_function_instance_data->m_name;
auto bytecode_executable = Bytecode::Generator::generate_from_function(vm, shared_function_instance_data, builtin_abstract_operations_enabled);
bytecode_executable->name = name;
if (Bytecode::g_dump_bytecode)
bytecode_executable->dump();
return bytecode_executable;
}
// NOTE: This function assumes that the index is valid within the TypedArray,
// and that the TypedArray is not detached.
template<typename T>
inline Value fast_typed_array_get_element(TypedArrayBase& typed_array, u32 index)
{
Checked<u32> offset_into_array_buffer = index;
offset_into_array_buffer *= sizeof(T);
offset_into_array_buffer += typed_array.byte_offset();
if (offset_into_array_buffer.has_overflow()) [[unlikely]] {
return js_undefined();
}
auto const& array_buffer = *typed_array.viewed_array_buffer();
auto const* slot = reinterpret_cast<T const*>(array_buffer.buffer().offset_pointer(offset_into_array_buffer.value()));
return Value { *slot };
}
// NOTE: This function assumes that the index is valid within the TypedArray,
// and that the TypedArray is not detached.
template<typename T>
inline void fast_typed_array_set_element(TypedArrayBase& typed_array, u32 index, T value)
{
Checked<u32> offset_into_array_buffer = index;
offset_into_array_buffer *= sizeof(T);
offset_into_array_buffer += typed_array.byte_offset();
if (offset_into_array_buffer.has_overflow()) [[unlikely]] {
return;
}
auto& array_buffer = *typed_array.viewed_array_buffer();
auto* slot = reinterpret_cast<T*>(array_buffer.buffer().offset_pointer(offset_into_array_buffer.value()));
*slot = value;
}
static COLD Completion throw_null_or_undefined_property_get(VM& vm, Value base_value, Optional<IdentifierTableIndex> base_identifier, IdentifierTableIndex property_identifier, Executable const& executable)
{
VERIFY(base_value.is_nullish());
if (base_identifier.has_value())
return vm.throw_completion<TypeError>(ErrorType::ToObjectNullOrUndefinedWithPropertyAndName, executable.get_identifier(property_identifier), base_value, executable.get_identifier(base_identifier.value()));
return vm.throw_completion<TypeError>(ErrorType::ToObjectNullOrUndefinedWithProperty, executable.get_identifier(property_identifier), base_value);
}
static COLD Completion throw_null_or_undefined_property_get(VM& vm, Value base_value, Optional<IdentifierTableIndex> base_identifier, Value property, Executable const& executable)
{
VERIFY(base_value.is_nullish());
if (base_identifier.has_value())
return vm.throw_completion<TypeError>(ErrorType::ToObjectNullOrUndefinedWithPropertyAndName, property, base_value, executable.get_identifier(base_identifier.value()));
return vm.throw_completion<TypeError>(ErrorType::ToObjectNullOrUndefinedWithProperty, property, base_value);
}
ALWAYS_INLINE ThrowCompletionOr<GC::Ref<Object>> base_object_for_get(VM& vm, Value base_value, Optional<IdentifierTableIndex> base_identifier, IdentifierTableIndex property_identifier, Executable const& executable)
{
if (auto base_object = base_object_for_get_impl(vm, base_value)) [[likely]]
return GC::Ref { *base_object };
// NOTE: At this point this is guaranteed to throw (null or undefined).
return throw_null_or_undefined_property_get(vm, base_value, base_identifier, property_identifier, executable);
}
ALWAYS_INLINE ThrowCompletionOr<GC::Ref<Object>> base_object_for_get(VM& vm, Value base_value, Optional<IdentifierTableIndex> base_identifier, Value property, Executable const& executable)
{
if (auto base_object = base_object_for_get_impl(vm, base_value)) [[likely]]
return GC::Ref { *base_object };
// NOTE: At this point this is guaranteed to throw (null or undefined).
return throw_null_or_undefined_property_get(vm, base_value, base_identifier, property, executable);
}
inline ThrowCompletionOr<Value> get_by_value(VM& vm, Optional<IdentifierTableIndex> base_identifier, Value base_value, Value property_key_value, Executable const& executable)
{
// OPTIMIZATION: Fast path for simple Int32 indexes in array-like objects.
if (base_value.is_object() && property_key_value.is_non_negative_int32()) {
auto& object = base_value.as_object();
auto index = static_cast<u32>(property_key_value.as_i32());
auto const* object_storage = object.indexed_properties().storage();
// For "non-typed arrays":
if (!object.may_interfere_with_indexed_property_access()
&& object_storage) {
auto maybe_value = [&] {
if (object_storage->is_simple_storage())
return static_cast<SimpleIndexedPropertyStorage const*>(object_storage)->inline_get(index);
else
return static_cast<GenericIndexedPropertyStorage const*>(object_storage)->get(index);
}();
if (maybe_value.has_value()) {
auto value = maybe_value->value;
if (!value.is_accessor())
return value;
}
}
// For typed arrays:
if (object.is_typed_array()) {
auto& typed_array = static_cast<TypedArrayBase&>(object);
auto canonical_index = CanonicalIndex { CanonicalIndex::Type::Index, index };
if (is_valid_integer_index(typed_array, canonical_index)) {
switch (typed_array.kind()) {
case TypedArrayBase::Kind::Uint8Array:
return fast_typed_array_get_element<u8>(typed_array, index);
case TypedArrayBase::Kind::Uint16Array:
return fast_typed_array_get_element<u16>(typed_array, index);
case TypedArrayBase::Kind::Uint32Array:
return fast_typed_array_get_element<u32>(typed_array, index);
case TypedArrayBase::Kind::Int8Array:
return fast_typed_array_get_element<i8>(typed_array, index);
case TypedArrayBase::Kind::Int16Array:
return fast_typed_array_get_element<i16>(typed_array, index);
case TypedArrayBase::Kind::Int32Array:
return fast_typed_array_get_element<i32>(typed_array, index);
case TypedArrayBase::Kind::Uint8ClampedArray:
return fast_typed_array_get_element<u8>(typed_array, index);
case TypedArrayBase::Kind::Float16Array:
return fast_typed_array_get_element<f16>(typed_array, index);
case TypedArrayBase::Kind::Float32Array:
return fast_typed_array_get_element<float>(typed_array, index);
case TypedArrayBase::Kind::Float64Array:
return fast_typed_array_get_element<double>(typed_array, index);
default:
// FIXME: Support more TypedArray kinds.
break;
}
}
switch (typed_array.kind()) {
#define __JS_ENUMERATE(ClassName, snake_name, PrototypeName, ConstructorName, Type) \
case TypedArrayBase::Kind::ClassName: \
return typed_array_get_element<Type>(typed_array, canonical_index);
JS_ENUMERATE_TYPED_ARRAYS
#undef __JS_ENUMERATE
}
}
}
auto object = TRY(base_object_for_get(vm, base_value, base_identifier, property_key_value, executable));
auto property_key = TRY(property_key_value.to_property_key(vm));
if (base_value.is_string()) {
auto string_value = TRY(base_value.as_string().get(vm, property_key));
if (string_value.has_value())
return *string_value;
}
return TRY(object->internal_get(property_key, base_value));
}
inline ThrowCompletionOr<Value> get_global(Interpreter& interpreter, IdentifierTableIndex identifier_index, Strict strict, GlobalVariableCache& cache)
{
auto& vm = interpreter.vm();
auto& binding_object = interpreter.global_object();
auto& declarative_record = interpreter.global_declarative_environment();
auto& shape = binding_object.shape();
if (cache.environment_serial_number == declarative_record.environment_serial_number()) {
// OPTIMIZATION: For global var bindings, if the shape of the global object hasn't changed,
// we can use the cached property offset.
if (&shape == cache.entries[0].shape && (!shape.is_dictionary() || shape.dictionary_generation() == cache.entries[0].shape_dictionary_generation)) {
auto value = binding_object.get_direct(cache.entries[0].property_offset);
if (value.is_accessor())
return TRY(call(vm, value.as_accessor().getter(), &binding_object));
return value;
}
// OPTIMIZATION: For global lexical bindings, if the global declarative environment hasn't changed,
// we can use the cached environment binding index.
if (cache.has_environment_binding_index) {
if (cache.in_module_environment) {
auto module = vm.running_execution_context().script_or_module.get_pointer<GC::Ref<Module>>();
return (*module)->environment()->get_binding_value_direct(vm, cache.environment_binding_index);
}
return declarative_record.get_binding_value_direct(vm, cache.environment_binding_index);
}
}
cache.environment_serial_number = declarative_record.environment_serial_number();
auto& identifier = interpreter.get_identifier(identifier_index);
if (auto* module = vm.running_execution_context().script_or_module.get_pointer<GC::Ref<Module>>()) {
// NOTE: GetGlobal is used to access variables stored in the module environment and global environment.
// The module environment is checked first since it precedes the global environment in the environment chain.
auto& module_environment = *(*module)->environment();
Optional<size_t> index;
if (TRY(module_environment.has_binding(identifier, &index))) {
if (index.has_value()) {
cache.environment_binding_index = static_cast<u32>(index.value());
cache.has_environment_binding_index = true;
cache.in_module_environment = true;
return TRY(module_environment.get_binding_value_direct(vm, index.value()));
}
return TRY(module_environment.get_binding_value(vm, identifier, true));
}
}
Optional<size_t> offset;
if (TRY(declarative_record.has_binding(identifier, &offset))) {
cache.environment_binding_index = static_cast<u32>(offset.value());
cache.has_environment_binding_index = true;
cache.in_module_environment = false;
return TRY(declarative_record.get_binding_value(vm, identifier, strict == Strict::Yes));
}
if (TRY(binding_object.has_property(identifier))) [[likely]] {
CacheableGetPropertyMetadata cacheable_metadata;
auto value = TRY(binding_object.internal_get(identifier, &binding_object, &cacheable_metadata));
if (cacheable_metadata.type == CacheableGetPropertyMetadata::Type::GetOwnProperty) {
cache.entries[0].shape = shape;
cache.entries[0].property_offset = cacheable_metadata.property_offset.value();
if (shape.is_dictionary()) {
cache.entries[0].shape_dictionary_generation = shape.dictionary_generation();
}
}
return value;
}
return vm.throw_completion<ReferenceError>(ErrorType::UnknownIdentifier, identifier);
}
static COLD Completion throw_type_error_for_callee(Bytecode::Interpreter& interpreter, Value callee, StringView callee_type, Optional<StringTableIndex> const expression_string)
{
auto& vm = interpreter.vm();
if (expression_string.has_value())
return vm.throw_completion<TypeError>(ErrorType::IsNotAEvaluatedFrom, callee, callee_type, interpreter.current_executable().get_string(*expression_string));
return vm.throw_completion<TypeError>(ErrorType::IsNotA, callee, callee_type);
}
inline ThrowCompletionOr<void> throw_if_needed_for_call(Interpreter& interpreter, Value callee, Op::CallType call_type, Optional<StringTableIndex> const expression_string)
{
if ((call_type == Op::CallType::Call || call_type == Op::CallType::DirectEval)
&& !callee.is_function()) [[unlikely]]
return throw_type_error_for_callee(interpreter, callee, "function"sv, expression_string);
if (call_type == Op::CallType::Construct && !callee.is_constructor()) [[unlikely]]
return throw_type_error_for_callee(interpreter, callee, "constructor"sv, expression_string);
return {};
}
inline Value new_function(Interpreter& interpreter, u32 shared_function_data_index, Optional<Operand> const home_object)
{
auto& vm = interpreter.vm();
auto& shared_data = *interpreter.current_executable().shared_function_data[shared_function_data_index];
auto& realm = *vm.current_realm();
GC::Ref<Object> prototype = [&]() -> GC::Ref<Object> {
switch (shared_data.m_kind) {
case FunctionKind::Normal:
return realm.intrinsics().function_prototype();
case FunctionKind::Generator:
return realm.intrinsics().generator_function_prototype();
case FunctionKind::Async:
return realm.intrinsics().async_function_prototype();
case FunctionKind::AsyncGenerator:
return realm.intrinsics().async_generator_function_prototype();
}
VERIFY_NOT_REACHED();
}();
auto function = ECMAScriptFunctionObject::create_from_function_data(
realm, shared_data,
vm.lexical_environment(),
vm.running_execution_context().private_environment,
*prototype);
if (home_object.has_value()) {
auto home_object_value = interpreter.get(home_object.value());
function->set_home_object(&home_object_value.as_object());
}
return function;
}
inline ThrowCompletionOr<void> put_by_value(VM& vm, Value base, Optional<Utf16FlyString const&> const base_identifier, Value property_key_value, Value value, PutKind kind, Strict strict)
{
// OPTIMIZATION: Fast path for simple Int32 indexes in array-like objects.
if (kind == PutKind::Normal
&& base.is_object() && property_key_value.is_non_negative_int32()) {
auto& object = base.as_object();
auto* storage = object.indexed_properties().storage();
auto index = static_cast<u32>(property_key_value.as_i32());
// For "non-typed arrays":
if (storage
&& storage->is_simple_storage()
&& !object.may_interfere_with_indexed_property_access()) {
auto maybe_value = storage->get(index);
if (maybe_value.has_value()) {
auto existing_value = maybe_value->value;
if (!existing_value.is_accessor()) {
storage->put(index, value);
return {};
}
}
}
// For typed arrays:
if (object.is_typed_array()) {
auto& typed_array = static_cast<TypedArrayBase&>(object);
auto canonical_index = CanonicalIndex { CanonicalIndex::Type::Index, index };
if (is_valid_integer_index(typed_array, canonical_index)) {
if (value.is_int32()) {
switch (typed_array.kind()) {
case TypedArrayBase::Kind::Uint8Array:
fast_typed_array_set_element<u8>(typed_array, index, static_cast<u8>(value.as_i32()));
return {};
case TypedArrayBase::Kind::Uint16Array:
fast_typed_array_set_element<u16>(typed_array, index, static_cast<u16>(value.as_i32()));
return {};
case TypedArrayBase::Kind::Uint32Array:
fast_typed_array_set_element<u32>(typed_array, index, static_cast<u32>(value.as_i32()));
return {};
case TypedArrayBase::Kind::Int8Array:
fast_typed_array_set_element<i8>(typed_array, index, static_cast<i8>(value.as_i32()));
return {};
case TypedArrayBase::Kind::Int16Array:
fast_typed_array_set_element<i16>(typed_array, index, static_cast<i16>(value.as_i32()));
return {};
case TypedArrayBase::Kind::Int32Array:
fast_typed_array_set_element<i32>(typed_array, index, value.as_i32());
return {};
case TypedArrayBase::Kind::Uint8ClampedArray:
fast_typed_array_set_element<u8>(typed_array, index, clamp(value.as_i32(), 0, 255));
return {};
default:
break;
}
} else if (value.is_double()) {
switch (typed_array.kind()) {
case TypedArrayBase::Kind::Float16Array:
fast_typed_array_set_element<f16>(typed_array, index, static_cast<f16>(value.as_double()));
return {};
case TypedArrayBase::Kind::Float32Array:
fast_typed_array_set_element<float>(typed_array, index, static_cast<float>(value.as_double()));
return {};
case TypedArrayBase::Kind::Float64Array:
fast_typed_array_set_element<double>(typed_array, index, value.as_double());
return {};
case TypedArrayBase::Kind::Int8Array:
fast_typed_array_set_element<i8>(typed_array, index, MUST(value.to_i8(vm)));
return {};
case TypedArrayBase::Kind::Int16Array:
fast_typed_array_set_element<i16>(typed_array, index, MUST(value.to_i16(vm)));
return {};
case TypedArrayBase::Kind::Int32Array:
fast_typed_array_set_element<i32>(typed_array, index, MUST(value.to_i32(vm)));
return {};
case TypedArrayBase::Kind::Uint8Array:
fast_typed_array_set_element<u8>(typed_array, index, MUST(value.to_u8(vm)));
return {};
case TypedArrayBase::Kind::Uint16Array:
fast_typed_array_set_element<u16>(typed_array, index, MUST(value.to_u16(vm)));
return {};
case TypedArrayBase::Kind::Uint32Array:
fast_typed_array_set_element<u32>(typed_array, index, MUST(value.to_u32(vm)));
return {};
default:
break;
}
}
// FIXME: Support more TypedArray kinds.
}
if (typed_array.kind() == TypedArrayBase::Kind::Uint32Array && value.is_integral_number()) {
auto integer = value.as_double();
if (AK::is_within_range<u32>(integer) && is_valid_integer_index(typed_array, canonical_index)) {
fast_typed_array_set_element<u32>(typed_array, index, static_cast<u32>(integer));
return {};
}
}
switch (typed_array.kind()) {
#define __JS_ENUMERATE(ClassName, snake_name, PrototypeName, ConstructorName, Type) \
case TypedArrayBase::Kind::ClassName: \
return typed_array_set_element<Type>(typed_array, canonical_index, value);
JS_ENUMERATE_TYPED_ARRAYS
#undef __JS_ENUMERATE
}
return {};
}
}
auto property_key = TRY(property_key_value.to_property_key(vm));
TRY(put_by_property_key(vm, base, base, value, base_identifier, property_key, kind, strict));
return {};
}
struct CalleeAndThis {
Value callee;
Value this_value;
};
inline ThrowCompletionOr<CalleeAndThis> get_callee_and_this_from_environment(Interpreter& interpreter, Utf16FlyString const& name, Strict strict, EnvironmentCoordinate& cache)
{
auto& vm = interpreter.vm();
Value callee = js_undefined();
if (cache.is_valid()) [[likely]] {
auto const* environment = interpreter.running_execution_context().lexical_environment.ptr();
LibJS: Limit eval() deoptimization to the containing function scope Previously, when direct eval() was called, we would mark the entire environment chain as "permanently screwed by eval", disabling variable access caching all the way up to the global scope. This was overly conservative. According to the ECMAScript specification, a sloppy direct eval() can only inject var declarations into its containing function's variable environment - it cannot inject variables into parent function scopes. This patch makes two changes: 1. Stop propagating the "screwed by eval" flag at function boundaries. When set_permanently_screwed_by_eval() hits a FunctionEnvironment or GlobalEnvironment, it no longer continues to outer environments. 2. Check each environment during cache lookup traversal. If any environment in the path is marked as screwed, we bail to the slow path. This catches the case where we're inside a function with eval and have a cached coordinate pointing to an outer scope. The second change is necessary because eval can create local variables that shadow outer bindings. When looking up a variable from inside a function that called eval, we can't trust cached coordinates that point to outer scopes, since eval may have created a closer binding. This improves performance for code with nested functions where an inner function uses eval but parent functions perform many variable accesses. The parent functions can now use cached environment coordinates. All 29 new tests verify behavior matches V8.
2026-01-26 20:40:05 +01:00
for (size_t i = 0; i < cache.hops; ++i) {
if (environment->is_permanently_screwed_by_eval()) [[unlikely]]
goto slow_path;
environment = environment->outer_environment();
LibJS: Limit eval() deoptimization to the containing function scope Previously, when direct eval() was called, we would mark the entire environment chain as "permanently screwed by eval", disabling variable access caching all the way up to the global scope. This was overly conservative. According to the ECMAScript specification, a sloppy direct eval() can only inject var declarations into its containing function's variable environment - it cannot inject variables into parent function scopes. This patch makes two changes: 1. Stop propagating the "screwed by eval" flag at function boundaries. When set_permanently_screwed_by_eval() hits a FunctionEnvironment or GlobalEnvironment, it no longer continues to outer environments. 2. Check each environment during cache lookup traversal. If any environment in the path is marked as screwed, we bail to the slow path. This catches the case where we're inside a function with eval and have a cached coordinate pointing to an outer scope. The second change is necessary because eval can create local variables that shadow outer bindings. When looking up a variable from inside a function that called eval, we can't trust cached coordinates that point to outer scopes, since eval may have created a closer binding. This improves performance for code with nested functions where an inner function uses eval but parent functions perform many variable accesses. The parent functions can now use cached environment coordinates. All 29 new tests verify behavior matches V8.
2026-01-26 20:40:05 +01:00
}
if (!environment->is_permanently_screwed_by_eval()) [[likely]] {
callee = TRY(static_cast<DeclarativeEnvironment const&>(*environment).get_binding_value_direct(vm, cache.index));
auto this_value = js_undefined();
if (auto base_object = environment->with_base_object()) [[unlikely]]
this_value = base_object;
return CalleeAndThis {
.callee = callee,
.this_value = this_value,
};
}
LibJS: Limit eval() deoptimization to the containing function scope Previously, when direct eval() was called, we would mark the entire environment chain as "permanently screwed by eval", disabling variable access caching all the way up to the global scope. This was overly conservative. According to the ECMAScript specification, a sloppy direct eval() can only inject var declarations into its containing function's variable environment - it cannot inject variables into parent function scopes. This patch makes two changes: 1. Stop propagating the "screwed by eval" flag at function boundaries. When set_permanently_screwed_by_eval() hits a FunctionEnvironment or GlobalEnvironment, it no longer continues to outer environments. 2. Check each environment during cache lookup traversal. If any environment in the path is marked as screwed, we bail to the slow path. This catches the case where we're inside a function with eval and have a cached coordinate pointing to an outer scope. The second change is necessary because eval can create local variables that shadow outer bindings. When looking up a variable from inside a function that called eval, we can't trust cached coordinates that point to outer scopes, since eval may have created a closer binding. This improves performance for code with nested functions where an inner function uses eval but parent functions perform many variable accesses. The parent functions can now use cached environment coordinates. All 29 new tests verify behavior matches V8.
2026-01-26 20:40:05 +01:00
slow_path:
cache = {};
}
auto reference = TRY(vm.resolve_binding(name, strict));
if (reference.environment_coordinate().has_value())
cache = reference.environment_coordinate().value();
callee = TRY(reference.get_value(vm));
Value this_value;
if (reference.is_property_reference()) {
this_value = reference.get_this_value();
} else {
if (reference.is_environment_reference()) {
if (auto base_object = reference.base_environment().with_base_object()) [[unlikely]]
this_value = base_object;
}
}
return CalleeAndThis {
.callee = callee,
.this_value = this_value,
};
}
// 13.2.7.3 Runtime Semantics: Evaluation, https://tc39.es/ecma262/#sec-regular-expression-literals-runtime-semantics-evaluation
inline Value new_regexp(VM& vm, Regex<ECMA262> const& regex, Utf16String pattern, Utf16String flags)
{
// 1. Let pattern be CodePointsToString(BodyText of RegularExpressionLiteral).
// 2. Let flags be CodePointsToString(FlagText of RegularExpressionLiteral).
// 3. Return ! RegExpCreate(pattern, flags).
auto& realm = *vm.current_realm();
// NOTE: We bypass RegExpCreate and subsequently RegExpAlloc as an optimization to use the already parsed values.
auto regexp_object = RegExpObject::create(realm, regex, move(pattern), move(flags));
// RegExpAlloc has these two steps from the 'Legacy RegExp features' proposal.
regexp_object->set_realm(realm);
// We don't need to check 'If SameValue(newTarget, thisRealm.[[Intrinsics]].[[%RegExp%]]) is true'
// here as we know RegExpCreate calls RegExpAlloc with %RegExp% for newTarget.
regexp_object->set_legacy_features_enabled(true);
return regexp_object;
}
inline ThrowCompletionOr<void> create_variable(VM& vm, Utf16FlyString const& name, Op::EnvironmentMode mode, bool is_global, bool is_immutable, bool is_strict)
{
if (mode == Op::EnvironmentMode::Lexical) {
VERIFY(!is_global);
// Note: This is papering over an issue where "FunctionDeclarationInstantiation" creates these bindings for us.
// Instead of crashing in there, we'll just raise an exception here.
if (TRY(vm.lexical_environment()->has_binding(name))) [[unlikely]]
return vm.throw_completion<InternalError>(TRY_OR_THROW_OOM(vm, String::formatted("Lexical environment already has binding '{}'", name)));
if (is_immutable)
return vm.lexical_environment()->create_immutable_binding(vm, name, is_strict);
return vm.lexical_environment()->create_mutable_binding(vm, name, is_strict);
}
if (!is_global) {
if (is_immutable)
return vm.variable_environment()->create_immutable_binding(vm, name, is_strict);
return vm.variable_environment()->create_mutable_binding(vm, name, is_strict);
}
// NOTE: CreateVariable with m_is_global set to true is expected to only be used in GlobalDeclarationInstantiation currently, which only uses "false" for "can_be_deleted".
// The only area that sets "can_be_deleted" to true is EvalDeclarationInstantiation, which is currently fully implemented in C++ and not in Bytecode.
return as<GlobalEnvironment>(vm.variable_environment())->create_global_var_binding(name, false);
}
inline ThrowCompletionOr<GC::Ref<Array>> iterator_to_array(VM& vm, Value iterator)
{
auto& iterator_record = static_cast<IteratorRecord&>(iterator.as_cell());
auto array = MUST(Array::create(*vm.current_realm(), 0));
size_t index = 0;
while (true) {
auto value = TRY(iterator_step_value(vm, iterator_record));
if (!value.has_value())
return array;
MUST(array->create_data_property_or_throw(index, value.release_value()));
index++;
}
}
inline ThrowCompletionOr<void> append(VM& vm, Value lhs, Value rhs, bool is_spread)
{
// Note: This OpCode is used to construct array literals and argument arrays for calls,
// containing at least one spread element,
// Iterating over such a spread element to unpack it has to be visible by
// the user courtesy of
// (1) https://tc39.es/ecma262/#sec-runtime-semantics-arrayaccumulation
// SpreadElement : ... AssignmentExpression
// 1. Let spreadRef be ? Evaluation of AssignmentExpression.
// 2. Let spreadObj be ? GetValue(spreadRef).
// 3. Let iteratorRecord be ? GetIterator(spreadObj).
// 4. Repeat,
// a. Let next be ? IteratorStep(iteratorRecord).
// b. If next is false, return nextIndex.
// c. Let nextValue be ? IteratorValue(next).
// d. Perform ! CreateDataPropertyOrThrow(array, ! ToString(𝔽(nextIndex)), nextValue).
// e. Set nextIndex to nextIndex + 1.
// (2) https://tc39.es/ecma262/#sec-runtime-semantics-argumentlistevaluation
// ArgumentList : ... AssignmentExpression
// 1. Let list be a new empty List.
// 2. Let spreadRef be ? Evaluation of AssignmentExpression.
// 3. Let spreadObj be ? GetValue(spreadRef).
// 4. Let iteratorRecord be ? GetIterator(spreadObj).
// 5. Repeat,
// a. Let next be ? IteratorStep(iteratorRecord).
// b. If next is false, return list.
// c. Let nextArg be ? IteratorValue(next).
// d. Append nextArg to list.
// ArgumentList : ArgumentList , ... AssignmentExpression
// 1. Let precedingArgs be ? ArgumentListEvaluation of ArgumentList.
// 2. Let spreadRef be ? Evaluation of AssignmentExpression.
// 3. Let iteratorRecord be ? GetIterator(? GetValue(spreadRef)).
// 4. Repeat,
// a. Let next be ? IteratorStep(iteratorRecord).
// b. If next is false, return precedingArgs.
// c. Let nextArg be ? IteratorValue(next).
// d. Append nextArg to precedingArgs.
// Note: We know from codegen, that lhs is a plain array with only indexed properties
auto& lhs_array = lhs.as_array();
auto lhs_size = lhs_array.indexed_properties().array_like_size();
if (is_spread) {
// ...rhs
size_t i = lhs_size;
TRY(get_iterator_values(vm, rhs, [&i, &lhs_array](Value iterator_value) -> Optional<Completion> {
lhs_array.indexed_properties().put(i, iterator_value, default_attributes);
++i;
return {};
}));
} else {
lhs_array.indexed_properties().put(lhs_size, rhs, default_attributes);
}
return {};
}
class JS_API PropertyNameIterator final
: public Object
, public BuiltinIterator {
JS_OBJECT(PropertyNameIterator, Object);
GC_DECLARE_ALLOCATOR(PropertyNameIterator);
public:
virtual ~PropertyNameIterator() override = default;
BuiltinIterator* as_builtin_iterator_if_next_is_not_redefined(Value) override { return this; }
ThrowCompletionOr<void> next(VM& vm, bool& done, Value& value) override
{
while (true) {
if (m_iterator == m_properties.end()) {
done = true;
return {};
}
auto const& entry = *m_iterator;
ScopeGuard remove_first = [&] { ++m_iterator; };
// If the property is deleted, don't include it (invariant no. 2)
if (!TRY(m_object->has_property(entry)))
continue;
done = false;
value = entry.to_value(vm);
return {};
}
}
private:
PropertyNameIterator(JS::Realm& realm, GC::Ref<Object> object, Vector<PropertyKey> properties)
: Object(realm, nullptr)
, m_object(object)
, m_properties(move(properties))
, m_iterator(m_properties.begin())
{
}
virtual void visit_edges(Visitor& visitor) override
{
Base::visit_edges(visitor);
visitor.visit(m_object);
for (auto& key : m_properties)
key.visit_edges(visitor);
if (!m_iterator.is_end())
m_iterator->visit_edges(visitor);
}
GC::Ref<Object> m_object;
Vector<PropertyKey> m_properties;
decltype(m_properties.begin()) m_iterator;
};
GC_DEFINE_ALLOCATOR(PropertyNameIterator);
// 14.7.5.9 EnumerateObjectProperties ( O ), https://tc39.es/ecma262/#sec-enumerate-object-properties
inline ThrowCompletionOr<IteratorRecordImpl> get_object_property_iterator(Interpreter& interpreter, Value value)
{
// While the spec does provide an algorithm, it allows us to implement it ourselves so long as we meet the following invariants:
// 1- Returned property keys do not include keys that are Symbols
// 2- Properties of the target object may be deleted during enumeration. A property that is deleted before it is processed by the iterator's next method is ignored
// 3- If new properties are added to the target object during enumeration, the newly added properties are not guaranteed to be processed in the active enumeration
// 4- A property name will be returned by the iterator's next method at most once in any enumeration.
// 5- Enumerating the properties of the target object includes enumerating properties of its prototype, and the prototype of the prototype, and so on, recursively;
// but a property of a prototype is not processed if it has the same name as a property that has already been processed by the iterator's next method.
// 6- The values of [[Enumerable]] attributes are not considered when determining if a property of a prototype object has already been processed.
// 7- The enumerable property names of prototype objects must be obtained by invoking EnumerateObjectProperties passing the prototype object as the argument.
// 8- EnumerateObjectProperties must obtain the own property keys of the target object by calling its [[OwnPropertyKeys]] internal method.
// 9- Property attributes of the target object must be obtained by calling its [[GetOwnProperty]] internal method
auto& vm = interpreter.vm();
// Invariant 3 effectively allows the implementation to ignore newly added keys, and we do so (similar to other implementations).
auto object = TRY(value.to_object(vm));
// Note: While the spec doesn't explicitly require these to be ordered, it says that the values should be retrieved via OwnPropertyKeys,
// so we just keep the order consistent anyway.
size_t estimated_properties_count = 0;
HashTable<GC::Ref<Object>> seen_objects;
for (auto object_to_check = GC::Ptr { object.ptr() }; object_to_check && !seen_objects.contains(*object_to_check); object_to_check = TRY(object_to_check->internal_get_prototype_of())) {
seen_objects.set(*object_to_check);
estimated_properties_count += object_to_check->own_properties_count();
}
seen_objects.clear_with_capacity();
Vector<PropertyKey> properties;
properties.ensure_capacity(estimated_properties_count);
HashTable<PropertyKey> seen_non_enumerable_properties;
Optional<HashTable<PropertyKey>> seen_properties;
auto ensure_seen_properties = [&] {
if (seen_properties.has_value())
return;
seen_properties = HashTable<PropertyKey> {};
seen_properties->ensure_capacity(properties.size());
for (auto const& property : properties)
seen_properties->set(property);
};
// Collect all keys immediately (invariant no. 5)
bool in_prototype_chain = false;
for (auto object_to_check = GC::Ptr { object.ptr() }; object_to_check && !seen_objects.contains(*object_to_check); object_to_check = TRY(object_to_check->internal_get_prototype_of())) {
seen_objects.set(*object_to_check);
TRY(object_to_check->for_each_own_property_with_enumerability([&](PropertyKey const& property_key, bool enumerable) -> ThrowCompletionOr<void> {
if (!enumerable)
seen_non_enumerable_properties.set(property_key);
if (in_prototype_chain && enumerable) {
if (seen_non_enumerable_properties.contains(property_key))
return {};
ensure_seen_properties();
if (seen_properties->contains(property_key))
return {};
}
if (enumerable)
properties.append(property_key);
if (seen_properties.has_value())
seen_properties->set(property_key);
return {};
}));
in_prototype_chain = true;
}
auto iterator = interpreter.realm().create<PropertyNameIterator>(interpreter.realm(), object, move(properties));
return IteratorRecordImpl { .done = false, .iterator = iterator, .next_method = js_undefined() };
}
ByteString Instruction::to_byte_string(Bytecode::Executable const& executable) const
{
#define __BYTECODE_OP(op) \
case Instruction::Type::op: \
return static_cast<Bytecode::Op::op const&>(*this).to_byte_string_impl(executable);
switch (type()) {
ENUMERATE_BYTECODE_OPS(__BYTECODE_OP)
default:
VERIFY_NOT_REACHED();
}
#undef __BYTECODE_OP
}
}
namespace JS::Bytecode::Op {
#define JS_DEFINE_EXECUTE_FOR_COMMON_BINARY_OP(OpTitleCase, op_snake_case) \
ThrowCompletionOr<void> OpTitleCase::execute_impl(Bytecode::Interpreter& interpreter) const \
{ \
auto& vm = interpreter.vm(); \
auto lhs = interpreter.get(m_lhs); \
auto rhs = interpreter.get(m_rhs); \
interpreter.set(m_dst, Value { TRY(op_snake_case(vm, lhs, rhs)) }); \
return {}; \
}
JS_ENUMERATE_COMMON_BINARY_OPS_WITHOUT_FAST_PATH(JS_DEFINE_EXECUTE_FOR_COMMON_BINARY_OP)
ThrowCompletionOr<void> Add::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto const lhs = interpreter.get(m_lhs);
auto const rhs = interpreter.get(m_rhs);
if (lhs.is_number() && rhs.is_number()) [[likely]] {
if (lhs.is_int32() && rhs.is_int32()) {
if (!Checked<i32>::addition_would_overflow(lhs.as_i32(), rhs.as_i32())) [[likely]] {
interpreter.set(m_dst, Value(lhs.as_i32() + rhs.as_i32()));
return {};
}
auto result = static_cast<i64>(lhs.as_i32()) + static_cast<i64>(rhs.as_i32());
interpreter.set(m_dst, Value(result, Value::CannotFitInInt32::Indeed));
return {};
}
interpreter.set(m_dst, Value(lhs.as_double() + rhs.as_double()));
return {};
}
interpreter.set(m_dst, TRY(add(vm, lhs, rhs)));
return {};
}
ThrowCompletionOr<void> Mul::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto const lhs = interpreter.get(m_lhs);
auto const rhs = interpreter.get(m_rhs);
if (lhs.is_number() && rhs.is_number()) [[likely]] {
if (lhs.is_int32() && rhs.is_int32()) {
if (!Checked<i32>::multiplication_would_overflow(lhs.as_i32(), rhs.as_i32())) [[likely]] {
auto lhs_i32 = lhs.as_i32();
auto rhs_i32 = rhs.as_i32();
auto result = lhs_i32 * rhs_i32;
if (result != 0) [[likely]] {
interpreter.set(m_dst, Value(result));
return {};
}
// NB: When the mathematical result is zero, the sign depends on the operand
// signs. We can determine it directly here instead of widening to double.
auto is_negative_zero = (lhs_i32 < 0) != (rhs_i32 < 0);
interpreter.set(m_dst, is_negative_zero ? Value(-0.0) : Value(0));
return {};
}
auto result = static_cast<i64>(lhs.as_i32()) * static_cast<i64>(rhs.as_i32());
interpreter.set(m_dst, Value(result, Value::CannotFitInInt32::Indeed));
return {};
}
interpreter.set(m_dst, Value(lhs.as_double() * rhs.as_double()));
return {};
}
interpreter.set(m_dst, TRY(mul(vm, lhs, rhs)));
return {};
}
ThrowCompletionOr<void> Div::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto const lhs = interpreter.get(m_lhs);
auto const rhs = interpreter.get(m_rhs);
if (lhs.is_number() && rhs.is_number()) [[likely]] {
interpreter.set(m_dst, Value(lhs.as_double() / rhs.as_double()));
return {};
}
interpreter.set(m_dst, TRY(div(vm, lhs, rhs)));
return {};
}
ThrowCompletionOr<void> Mod::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto const lhs = interpreter.get(m_lhs);
auto const rhs = interpreter.get(m_rhs);
if (lhs.is_number() && rhs.is_number()) [[likely]] {
if (lhs.is_int32() && rhs.is_int32()) {
auto n = lhs.as_i32();
auto d = rhs.as_i32();
if (d == 0) {
interpreter.set(m_dst, js_nan());
return {};
}
if (n == NumericLimits<i32>::min() && d == -1) {
interpreter.set(m_dst, Value(-0.0));
return {};
}
auto result = n % d;
if (result == 0 && n < 0) {
interpreter.set(m_dst, Value(-0.0));
return {};
}
interpreter.set(m_dst, Value(result));
return {};
}
interpreter.set(m_dst, Value(fmod(lhs.as_double(), rhs.as_double())));
return {};
}
interpreter.set(m_dst, TRY(mod(vm, lhs, rhs)));
return {};
}
ThrowCompletionOr<void> Sub::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto const lhs = interpreter.get(m_lhs);
auto const rhs = interpreter.get(m_rhs);
if (lhs.is_number() && rhs.is_number()) [[likely]] {
if (lhs.is_int32() && rhs.is_int32()) {
if (!Checked<i32>::subtraction_would_overflow(lhs.as_i32(), rhs.as_i32())) [[likely]] {
interpreter.set(m_dst, Value(lhs.as_i32() - rhs.as_i32()));
return {};
}
auto result = static_cast<i64>(lhs.as_i32()) - static_cast<i64>(rhs.as_i32());
interpreter.set(m_dst, Value(result, Value::CannotFitInInt32::Indeed));
return {};
}
interpreter.set(m_dst, Value(lhs.as_double() - rhs.as_double()));
return {};
}
interpreter.set(m_dst, TRY(sub(vm, lhs, rhs)));
return {};
}
ThrowCompletionOr<void> BitwiseXor::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto const lhs = interpreter.get(m_lhs);
auto const rhs = interpreter.get(m_rhs);
if (lhs.is_int32() && rhs.is_int32()) {
interpreter.set(m_dst, Value(lhs.as_i32() ^ rhs.as_i32()));
return {};
}
interpreter.set(m_dst, TRY(bitwise_xor(vm, lhs, rhs)));
return {};
}
ThrowCompletionOr<void> BitwiseAnd::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto const lhs = interpreter.get(m_lhs);
auto const rhs = interpreter.get(m_rhs);
if (lhs.is_int32() && rhs.is_int32()) {
interpreter.set(m_dst, Value(lhs.as_i32() & rhs.as_i32()));
return {};
}
interpreter.set(m_dst, TRY(bitwise_and(vm, lhs, rhs)));
return {};
}
ThrowCompletionOr<void> ToInt32::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto const value = interpreter.get(m_value);
if (value.is_int32()) [[likely]] {
interpreter.set(m_dst, value);
return {};
}
interpreter.set(m_dst, Value(TRY(value.to_i32(vm))));
return {};
}
ThrowCompletionOr<void> ToString::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
interpreter.set(m_dst, Value { TRY(interpreter.get(m_value).to_primitive_string(vm)) });
return {};
}
ThrowCompletionOr<void> ToPrimitiveWithStringHint::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
interpreter.set(m_dst, TRY(interpreter.get(m_value).to_primitive(vm, Value::PreferredType::String)));
return {};
}
ThrowCompletionOr<void> BitwiseOr::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto const lhs = interpreter.get(m_lhs);
auto const rhs = interpreter.get(m_rhs);
if (lhs.is_int32() && rhs.is_int32()) {
interpreter.set(m_dst, Value(lhs.as_i32() | rhs.as_i32()));
return {};
}
interpreter.set(m_dst, TRY(bitwise_or(vm, lhs, rhs)));
return {};
}
ThrowCompletionOr<void> UnsignedRightShift::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto const lhs = interpreter.get(m_lhs);
auto const rhs = interpreter.get(m_rhs);
if (lhs.is_int32() && rhs.is_int32()) {
auto const shift_count = static_cast<u32>(rhs.as_i32()) % 32;
interpreter.set(m_dst, Value(static_cast<u32>(lhs.as_i32()) >> shift_count));
return {};
}
interpreter.set(m_dst, TRY(unsigned_right_shift(vm, lhs, rhs)));
return {};
}
ThrowCompletionOr<void> RightShift::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto const lhs = interpreter.get(m_lhs);
auto const rhs = interpreter.get(m_rhs);
if (lhs.is_int32() && rhs.is_int32()) {
auto const shift_count = static_cast<u32>(rhs.as_i32()) % 32;
interpreter.set(m_dst, Value(lhs.as_i32() >> shift_count));
return {};
}
interpreter.set(m_dst, TRY(right_shift(vm, lhs, rhs)));
return {};
}
ThrowCompletionOr<void> LeftShift::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto const lhs = interpreter.get(m_lhs);
auto const rhs = interpreter.get(m_rhs);
if (lhs.is_int32() && rhs.is_int32()) {
auto const shift_count = static_cast<u32>(rhs.as_i32()) % 32;
interpreter.set(m_dst, Value(lhs.as_i32() << shift_count));
return {};
}
interpreter.set(m_dst, TRY(left_shift(vm, lhs, rhs)));
return {};
}
ThrowCompletionOr<void> LessThan::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto const lhs = interpreter.get(m_lhs);
auto const rhs = interpreter.get(m_rhs);
if (lhs.is_number() && rhs.is_number()) [[likely]] {
if (lhs.is_int32() && rhs.is_int32()) {
interpreter.set(m_dst, Value(lhs.as_i32() < rhs.as_i32()));
return {};
}
interpreter.set(m_dst, Value(lhs.as_double() < rhs.as_double()));
return {};
}
interpreter.set(m_dst, Value { TRY(less_than(vm, lhs, rhs)) });
return {};
}
ThrowCompletionOr<void> LessThanEquals::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto const lhs = interpreter.get(m_lhs);
auto const rhs = interpreter.get(m_rhs);
if (lhs.is_number() && rhs.is_number()) [[likely]] {
if (lhs.is_int32() && rhs.is_int32()) {
interpreter.set(m_dst, Value(lhs.as_i32() <= rhs.as_i32()));
return {};
}
interpreter.set(m_dst, Value(lhs.as_double() <= rhs.as_double()));
return {};
}
interpreter.set(m_dst, Value { TRY(less_than_equals(vm, lhs, rhs)) });
return {};
}
ThrowCompletionOr<void> GreaterThan::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto const lhs = interpreter.get(m_lhs);
auto const rhs = interpreter.get(m_rhs);
if (lhs.is_number() && rhs.is_number()) [[likely]] {
if (lhs.is_int32() && rhs.is_int32()) {
interpreter.set(m_dst, Value(lhs.as_i32() > rhs.as_i32()));
return {};
}
interpreter.set(m_dst, Value(lhs.as_double() > rhs.as_double()));
return {};
}
interpreter.set(m_dst, Value { TRY(greater_than(vm, lhs, rhs)) });
return {};
}
ThrowCompletionOr<void> GreaterThanEquals::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto const lhs = interpreter.get(m_lhs);
auto const rhs = interpreter.get(m_rhs);
if (lhs.is_number() && rhs.is_number()) [[likely]] {
if (lhs.is_int32() && rhs.is_int32()) {
interpreter.set(m_dst, Value(lhs.as_i32() >= rhs.as_i32()));
return {};
}
interpreter.set(m_dst, Value(lhs.as_double() >= rhs.as_double()));
return {};
}
interpreter.set(m_dst, Value { TRY(greater_than_equals(vm, lhs, rhs)) });
return {};
}
void Typeof::execute_impl(Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
interpreter.set(dst(), interpreter.get(src()).typeof_(vm));
}
void Not::execute_impl(Interpreter& interpreter) const
{
interpreter.set(dst(), Value(!interpreter.get(src()).to_boolean()));
}
#define JS_DEFINE_COMMON_UNARY_OP(OpTitleCase, op_snake_case) \
ThrowCompletionOr<void> OpTitleCase::execute_impl(Bytecode::Interpreter& interpreter) const \
{ \
auto& vm = interpreter.vm(); \
interpreter.set(dst(), TRY(op_snake_case(vm, interpreter.get(src())))); \
return {}; \
}
JS_ENUMERATE_COMMON_UNARY_OPS(JS_DEFINE_COMMON_UNARY_OP)
void NewArray::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto array = MUST(Array::create(interpreter.realm(), m_element_count));
for (size_t i = 0; i < m_element_count; i++) {
array->indexed_properties().put(i, interpreter.get(m_elements[i]), default_attributes);
}
interpreter.set(dst(), array);
}
void NewPrimitiveArray::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto array = MUST(Array::create(interpreter.realm(), m_element_count));
for (size_t i = 0; i < m_element_count; i++)
array->indexed_properties().put(i, m_elements[i], default_attributes);
interpreter.set(dst(), array);
}
// 13.2.8.4 GetTemplateObject ( templateLiteral ), https://tc39.es/ecma262/#sec-gettemplateobject
void GetTemplateObject::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto& cache = *bit_cast<TemplateObjectCache*>(m_cache);
// 1. Let realm be the current Realm Record.
auto& realm = *vm.current_realm();
// 2. Let templateRegistry be realm.[[TemplateMap]].
// 3. For each element e of templateRegistry, do
// a. If e.[[Site]] is the same Parse Node as templateLiteral, then
// i. Return e.[[Array]].
if (cache.cached_template_object) {
interpreter.set(dst(), cache.cached_template_object);
return;
}
// 4. Let rawStrings be the TemplateStrings of templateLiteral with argument true.
// 5. Assert: rawStrings is a List of Strings.
// 6. Let cookedStrings be the TemplateStrings of templateLiteral with argument false.
// NOTE: This has already been done.
// 7. Let count be the number of elements in the List cookedStrings.
// NOTE: m_strings contains [cooked_0, ..., cooked_n, raw_0, ..., raw_n]
// 8. Assert: count ≤ 2**32 - 1.
// NOTE: Done by having count be a u32.
u32 count = m_strings_count / 2;
// 9. Let template be ! ArrayCreate(count).
auto template_object = MUST(Array::create(realm, count));
// 10. Let rawObj be ! ArrayCreate(count).
auto raw_object = MUST(Array::create(realm, count));
// 12. Repeat, while index < count,
for (size_t index = 0; index < count; index++) {
// a. Let prop be ! ToString(𝔽(index)).
// b. Let cookedValue be cookedStrings[index].
auto cooked_value = interpreter.get(m_strings[index]);
// c. Perform ! DefinePropertyOrThrow(template, prop, PropertyDescriptor { [[Value]]: cookedValue, [[Writable]]: false, [[Enumerable]]: true, [[Configurable]]: false }).
template_object->indexed_properties().put(index, cooked_value, Attribute::Enumerable);
// d. Let rawValue be the String value rawStrings[index].
auto raw_value = interpreter.get(m_strings[count + index]);
// e. Perform ! DefinePropertyOrThrow(rawObj, prop, PropertyDescriptor { [[Value]]: rawValue, [[Writable]]: false, [[Enumerable]]: true, [[Configurable]]: false }).
raw_object->indexed_properties().put(index, raw_value, Attribute::Enumerable);
// f. Set index to index + 1.
}
// 13. Perform ! SetIntegrityLevel(rawObj, FROZEN).
MUST(raw_object->set_integrity_level(Object::IntegrityLevel::Frozen));
// 14. Perform ! DefinePropertyOrThrow(template, "raw", PropertyDescriptor { [[Value]]: rawObj, [[Writable]]: false, [[Enumerable]]: false, [[Configurable]]: false }).
template_object->define_direct_property(vm.names.raw, raw_object, PropertyAttributes {});
// 15. Perform ! SetIntegrityLevel(template, FROZEN).
MUST(template_object->set_integrity_level(Object::IntegrityLevel::Frozen));
// 16. Append the Record { [[Site]]: templateLiteral, [[Array]]: template } to realm.[[TemplateMap]].
cache.cached_template_object = template_object;
// 17. Return template.
interpreter.set(dst(), template_object);
}
ThrowCompletionOr<void> NewArrayWithLength::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto length = static_cast<u64>(interpreter.get(m_array_length).as_double());
auto array = TRY(Array::create(interpreter.realm(), length));
interpreter.set(m_dst, array);
return {};
}
void AddPrivateName::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto const& name = interpreter.get_identifier(m_name);
interpreter.vm().running_execution_context().private_environment->add_private_name(name);
}
ThrowCompletionOr<void> ArrayAppend::execute_impl(Bytecode::Interpreter& interpreter) const
{
return append(interpreter.vm(), interpreter.get(dst()), interpreter.get(src()), m_is_spread);
}
ThrowCompletionOr<void> ImportCall::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto specifier = interpreter.get(m_specifier);
auto options_value = interpreter.get(m_options);
interpreter.set(dst(), TRY(perform_import_call(vm, specifier, options_value)));
return {};
}
ThrowCompletionOr<void> IteratorToArray::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto& iterator_object = interpreter.get(m_iterator_object).as_object();
auto iterator_next_method = interpreter.get(m_iterator_next_method);
auto iterator_done_property = interpreter.get(m_iterator_done_property).as_bool();
IteratorRecordImpl iterator_record { .done = iterator_done_property, .iterator = iterator_object, .next_method = iterator_next_method };
auto array = MUST(Array::create(*vm.current_realm(), 0));
size_t index = 0;
while (true) {
auto value = TRY(iterator_step_value(vm, iterator_record));
if (!value.has_value())
break;
MUST(array->create_data_property_or_throw(index, value.release_value()));
index++;
}
interpreter.set(dst(), array);
return {};
}
void NewObject::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto& realm = *vm.current_realm();
if (m_cache) {
auto& cache = *bit_cast<ObjectShapeCache*>(m_cache);
auto cached_shape = cache.shape.ptr();
if (cached_shape) {
interpreter.set(dst(), Object::create_with_premade_shape(*cached_shape));
return;
}
}
interpreter.set(dst(), Object::create(realm, realm.intrinsics().object_prototype()));
}
void NewObjectWithNoPrototype::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto& realm = *vm.current_realm();
interpreter.set(dst(), Object::create(realm, nullptr));
}
void CacheObjectShape::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& cache = *bit_cast<ObjectShapeCache*>(m_cache);
if (!cache.shape) {
auto& object = interpreter.get(m_object).as_object();
cache.shape = &object.shape();
}
}
COLD static void init_object_literal_property_slow(Object& object, PropertyKey const& property_key, Value value, ObjectShapeCache& cache, u32 property_slot)
{
object.define_direct_property(property_key, value, JS::Attribute::Enumerable | JS::Attribute::Writable | JS::Attribute::Configurable);
// Cache the property offset for future fast-path use
// Note: lookup may fail if the shape is in dictionary mode or for other edge cases.
// We only cache if we're not in dictionary mode and the lookup succeeds.
if (!object.shape().is_dictionary()) {
auto metadata = object.shape().lookup(property_key);
if (metadata.has_value()) {
if (property_slot >= cache.property_offsets.size())
cache.property_offsets.resize(property_slot + 1);
cache.property_offsets[property_slot] = metadata->offset;
}
}
}
void InitObjectLiteralProperty::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& object = interpreter.get(m_object).as_object();
auto value = interpreter.get(m_src);
auto& cache = interpreter.current_executable().object_shape_caches[m_shape_cache_index];
// Fast path: if we have a cached shape and it matches, write directly to the cached offset
auto cached_shape = cache.shape.ptr();
if (cached_shape && &object.shape() == cached_shape && m_property_slot < cache.property_offsets.size()) {
object.put_direct(cache.property_offsets[m_property_slot], value);
return;
}
auto const& property_key = interpreter.current_executable().get_property_key(m_property);
init_object_literal_property_slow(object, property_key, value, cache, m_property_slot);
}
void NewRegExp::execute_impl(Bytecode::Interpreter& interpreter) const
{
interpreter.set(dst(),
LibJS/Bytecode: Move to a new bytecode format This patch moves us away from the accumulator-based bytecode format to one with explicit source and destination registers. The new format has multiple benefits: - ~25% faster on the Kraken and Octane benchmarks :^) - Fewer instructions to accomplish the same thing - Much easier for humans to read(!) Because this change requires a fundamental shift in how bytecode is generated, it is quite comprehensive. Main implementation mechanism: generate_bytecode() virtual function now takes an optional "preferred dst" operand, which allows callers to communicate when they have an operand that would be optimal for the result to go into. It also returns an optional "actual dst" operand, which is where the completion value (if any) of the AST node is stored after the node has "executed". One thing of note that's new: because instructions can now take locals as operands, this means we got rid of the GetLocal instruction. A side-effect of that is we have to think about the temporal deadzone (TDZ) a bit differently for locals (GetLocal would previously check for empty values and interpret that as a TDZ access and throw). We now insert special ThrowIfTDZ instructions in places where a local access may be in the TDZ, to maintain the correct behavior. There are a number of progressions and regressions from this test: A number of async generator tests have been accidentally fixed while converting the implementation to the new bytecode format. It didn't seem useful to preserve bugs in the original code when converting it. Some "does eval() return the correct completion value" tests have regressed, in particular ones related to propagating the appropriate completion after control flow statements like continue and break. These are all fairly obscure issues, and I believe we can continue working on them separately. The net test262 result is a progression though. :^)
2024-02-04 08:00:54 +01:00
new_regexp(
interpreter.vm(),
interpreter.current_executable().regex_table->get(m_regex_index),
interpreter.current_executable().get_string(m_source_index),
interpreter.current_executable().get_string(m_flags_index)));
LibJS/Bytecode: Move to a new bytecode format This patch moves us away from the accumulator-based bytecode format to one with explicit source and destination registers. The new format has multiple benefits: - ~25% faster on the Kraken and Octane benchmarks :^) - Fewer instructions to accomplish the same thing - Much easier for humans to read(!) Because this change requires a fundamental shift in how bytecode is generated, it is quite comprehensive. Main implementation mechanism: generate_bytecode() virtual function now takes an optional "preferred dst" operand, which allows callers to communicate when they have an operand that would be optimal for the result to go into. It also returns an optional "actual dst" operand, which is where the completion value (if any) of the AST node is stored after the node has "executed". One thing of note that's new: because instructions can now take locals as operands, this means we got rid of the GetLocal instruction. A side-effect of that is we have to think about the temporal deadzone (TDZ) a bit differently for locals (GetLocal would previously check for empty values and interpret that as a TDZ access and throw). We now insert special ThrowIfTDZ instructions in places where a local access may be in the TDZ, to maintain the correct behavior. There are a number of progressions and regressions from this test: A number of async generator tests have been accidentally fixed while converting the implementation to the new bytecode format. It didn't seem useful to preserve bugs in the original code when converting it. Some "does eval() return the correct completion value" tests have regressed, in particular ones related to propagating the appropriate completion after control flow statements like continue and break. These are all fairly obscure issues, and I believe we can continue working on them separately. The net test262 result is a progression though. :^)
2024-02-04 08:00:54 +01:00
}
COLD void NewReferenceError::execute_impl(Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto& realm = *vm.current_realm();
interpreter.set(dst(), ReferenceError::create(realm, interpreter.current_executable().get_string(m_error_string)));
}
COLD void NewTypeError::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto& realm = *vm.current_realm();
interpreter.set(dst(), TypeError::create(realm, interpreter.current_executable().get_string(m_error_string)));
}
ThrowCompletionOr<void> CopyObjectExcludingProperties::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto& realm = *vm.current_realm();
auto from_object = interpreter.get(m_from_object);
auto to_object = Object::create(realm, realm.intrinsics().object_prototype());
HashTable<PropertyKey> excluded_names;
for (size_t i = 0; i < m_excluded_names_count; ++i) {
excluded_names.set(TRY(interpreter.get(m_excluded_names[i]).to_property_key(vm)));
}
TRY(to_object->copy_data_properties(vm, from_object, excluded_names));
interpreter.set(dst(), to_object);
return {};
}
ThrowCompletionOr<void> ConcatString::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto string = TRY(interpreter.get(src()).to_primitive_string(vm));
interpreter.set(dst(), PrimitiveString::create(vm, interpreter.get(dst()).as_string(), string));
return {};
}
enum class BindingIsKnownToBeInitialized {
No,
Yes,
};
template<BindingIsKnownToBeInitialized binding_is_known_to_be_initialized>
static ThrowCompletionOr<void> get_binding(Interpreter& interpreter, Operand dst, IdentifierTableIndex identifier, Strict strict, EnvironmentCoordinate& cache)
{
auto& vm = interpreter.vm();
if (cache.is_valid()) [[likely]] {
auto const* environment = interpreter.running_execution_context().lexical_environment.ptr();
LibJS: Limit eval() deoptimization to the containing function scope Previously, when direct eval() was called, we would mark the entire environment chain as "permanently screwed by eval", disabling variable access caching all the way up to the global scope. This was overly conservative. According to the ECMAScript specification, a sloppy direct eval() can only inject var declarations into its containing function's variable environment - it cannot inject variables into parent function scopes. This patch makes two changes: 1. Stop propagating the "screwed by eval" flag at function boundaries. When set_permanently_screwed_by_eval() hits a FunctionEnvironment or GlobalEnvironment, it no longer continues to outer environments. 2. Check each environment during cache lookup traversal. If any environment in the path is marked as screwed, we bail to the slow path. This catches the case where we're inside a function with eval and have a cached coordinate pointing to an outer scope. The second change is necessary because eval can create local variables that shadow outer bindings. When looking up a variable from inside a function that called eval, we can't trust cached coordinates that point to outer scopes, since eval may have created a closer binding. This improves performance for code with nested functions where an inner function uses eval but parent functions perform many variable accesses. The parent functions can now use cached environment coordinates. All 29 new tests verify behavior matches V8.
2026-01-26 20:40:05 +01:00
for (size_t i = 0; i < cache.hops; ++i) {
if (environment->is_permanently_screwed_by_eval()) [[unlikely]]
goto slow_path;
environment = environment->outer_environment();
LibJS: Limit eval() deoptimization to the containing function scope Previously, when direct eval() was called, we would mark the entire environment chain as "permanently screwed by eval", disabling variable access caching all the way up to the global scope. This was overly conservative. According to the ECMAScript specification, a sloppy direct eval() can only inject var declarations into its containing function's variable environment - it cannot inject variables into parent function scopes. This patch makes two changes: 1. Stop propagating the "screwed by eval" flag at function boundaries. When set_permanently_screwed_by_eval() hits a FunctionEnvironment or GlobalEnvironment, it no longer continues to outer environments. 2. Check each environment during cache lookup traversal. If any environment in the path is marked as screwed, we bail to the slow path. This catches the case where we're inside a function with eval and have a cached coordinate pointing to an outer scope. The second change is necessary because eval can create local variables that shadow outer bindings. When looking up a variable from inside a function that called eval, we can't trust cached coordinates that point to outer scopes, since eval may have created a closer binding. This improves performance for code with nested functions where an inner function uses eval but parent functions perform many variable accesses. The parent functions can now use cached environment coordinates. All 29 new tests verify behavior matches V8.
2026-01-26 20:40:05 +01:00
}
if (!environment->is_permanently_screwed_by_eval()) [[likely]] {
Value value;
if constexpr (binding_is_known_to_be_initialized == BindingIsKnownToBeInitialized::No) {
value = TRY(static_cast<DeclarativeEnvironment const&>(*environment).get_binding_value_direct(vm, cache.index));
} else {
value = static_cast<DeclarativeEnvironment const&>(*environment).get_initialized_binding_value_direct(cache.index);
}
interpreter.set(dst, value);
return {};
}
LibJS: Limit eval() deoptimization to the containing function scope Previously, when direct eval() was called, we would mark the entire environment chain as "permanently screwed by eval", disabling variable access caching all the way up to the global scope. This was overly conservative. According to the ECMAScript specification, a sloppy direct eval() can only inject var declarations into its containing function's variable environment - it cannot inject variables into parent function scopes. This patch makes two changes: 1. Stop propagating the "screwed by eval" flag at function boundaries. When set_permanently_screwed_by_eval() hits a FunctionEnvironment or GlobalEnvironment, it no longer continues to outer environments. 2. Check each environment during cache lookup traversal. If any environment in the path is marked as screwed, we bail to the slow path. This catches the case where we're inside a function with eval and have a cached coordinate pointing to an outer scope. The second change is necessary because eval can create local variables that shadow outer bindings. When looking up a variable from inside a function that called eval, we can't trust cached coordinates that point to outer scopes, since eval may have created a closer binding. This improves performance for code with nested functions where an inner function uses eval but parent functions perform many variable accesses. The parent functions can now use cached environment coordinates. All 29 new tests verify behavior matches V8.
2026-01-26 20:40:05 +01:00
slow_path:
cache = {};
}
auto& executable = interpreter.current_executable();
auto reference = TRY(vm.resolve_binding(executable.get_identifier(identifier), strict));
if (reference.environment_coordinate().has_value())
cache = reference.environment_coordinate().value();
LibJS: Limit eval() deoptimization to the containing function scope Previously, when direct eval() was called, we would mark the entire environment chain as "permanently screwed by eval", disabling variable access caching all the way up to the global scope. This was overly conservative. According to the ECMAScript specification, a sloppy direct eval() can only inject var declarations into its containing function's variable environment - it cannot inject variables into parent function scopes. This patch makes two changes: 1. Stop propagating the "screwed by eval" flag at function boundaries. When set_permanently_screwed_by_eval() hits a FunctionEnvironment or GlobalEnvironment, it no longer continues to outer environments. 2. Check each environment during cache lookup traversal. If any environment in the path is marked as screwed, we bail to the slow path. This catches the case where we're inside a function with eval and have a cached coordinate pointing to an outer scope. The second change is necessary because eval can create local variables that shadow outer bindings. When looking up a variable from inside a function that called eval, we can't trust cached coordinates that point to outer scopes, since eval may have created a closer binding. This improves performance for code with nested functions where an inner function uses eval but parent functions perform many variable accesses. The parent functions can now use cached environment coordinates. All 29 new tests verify behavior matches V8.
2026-01-26 20:40:05 +01:00
interpreter.set(dst, TRY(reference.get_value(vm)));
return {};
}
ThrowCompletionOr<void> GetBinding::execute_impl(Bytecode::Interpreter& interpreter) const
{
return get_binding<BindingIsKnownToBeInitialized::No>(interpreter, m_dst, m_identifier, strict(), m_cache);
}
ThrowCompletionOr<void> GetInitializedBinding::execute_impl(Bytecode::Interpreter& interpreter) const
{
return get_binding<BindingIsKnownToBeInitialized::Yes>(interpreter, m_dst, m_identifier, strict(), m_cache);
}
ThrowCompletionOr<void> GetCalleeAndThisFromEnvironment::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto callee_and_this = TRY(get_callee_and_this_from_environment(
interpreter,
interpreter.get_identifier(m_identifier),
strict(),
m_cache));
interpreter.set(m_callee, callee_and_this.callee);
interpreter.set(m_this_value, callee_and_this.this_value);
return {};
}
ThrowCompletionOr<void> GetGlobal::execute_impl(Bytecode::Interpreter& interpreter) const
{
interpreter.set(dst(), TRY(get_global(interpreter, m_identifier, strict(), *bit_cast<GlobalVariableCache*>(m_cache))));
return {};
}
ThrowCompletionOr<void> SetGlobal::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto& binding_object = interpreter.global_object();
auto& declarative_record = interpreter.global_declarative_environment();
auto& cache = *bit_cast<GlobalVariableCache*>(m_cache);
auto& shape = binding_object.shape();
auto src = interpreter.get(m_src);
if (cache.environment_serial_number == declarative_record.environment_serial_number()) {
// OPTIMIZATION: For global var bindings, if the shape of the global object hasn't changed,
// we can use the cached property offset.
if (&shape == cache.entries[0].shape && (!shape.is_dictionary() || shape.dictionary_generation() == cache.entries[0].shape_dictionary_generation)) {
auto value = binding_object.get_direct(cache.entries[0].property_offset);
if (value.is_accessor())
TRY(call(vm, value.as_accessor().setter(), &binding_object, src));
else
binding_object.put_direct(cache.entries[0].property_offset, src);
return {};
}
// OPTIMIZATION: For global lexical bindings, if the global declarative environment hasn't changed,
// we can use the cached environment binding index.
if (cache.has_environment_binding_index) {
if (cache.in_module_environment) {
auto module = vm.running_execution_context().script_or_module.get_pointer<GC::Ref<Module>>();
TRY((*module)->environment()->set_mutable_binding_direct(vm, cache.environment_binding_index, src, strict() == Strict::Yes));
} else {
TRY(declarative_record.set_mutable_binding_direct(vm, cache.environment_binding_index, src, strict() == Strict::Yes));
}
return {};
}
}
cache.environment_serial_number = declarative_record.environment_serial_number();
auto& identifier = interpreter.get_identifier(m_identifier);
if (auto* module = vm.running_execution_context().script_or_module.get_pointer<GC::Ref<Module>>()) {
// NOTE: GetGlobal is used to access variables stored in the module environment and global environment.
// The module environment is checked first since it precedes the global environment in the environment chain.
auto& module_environment = *(*module)->environment();
Optional<size_t> index;
if (TRY(module_environment.has_binding(identifier, &index))) {
if (index.has_value()) {
cache.environment_binding_index = static_cast<u32>(index.value());
cache.has_environment_binding_index = true;
cache.in_module_environment = true;
return TRY(module_environment.set_mutable_binding_direct(vm, index.value(), src, strict() == Strict::Yes));
}
return TRY(module_environment.set_mutable_binding(vm, identifier, src, strict() == Strict::Yes));
}
}
Optional<size_t> offset;
if (TRY(declarative_record.has_binding(identifier, &offset))) {
cache.environment_binding_index = static_cast<u32>(offset.value());
cache.has_environment_binding_index = true;
cache.in_module_environment = false;
TRY(declarative_record.set_mutable_binding(vm, identifier, src, strict() == Strict::Yes));
return {};
}
if (TRY(binding_object.has_property(identifier))) {
CacheableSetPropertyMetadata cacheable_metadata;
auto success = TRY(binding_object.internal_set(identifier, src, &binding_object, &cacheable_metadata));
if (!success && strict() == Strict::Yes) [[unlikely]] {
// Note: Nothing like this in the spec, this is here to produce nicer errors instead of the generic one thrown by Object::set().
auto property_or_error = binding_object.internal_get_own_property(identifier);
if (!property_or_error.is_error()) {
auto property = property_or_error.release_value();
if (property.has_value() && !property->writable.value_or(true)) {
return vm.throw_completion<TypeError>(ErrorType::DescWriteNonWritable, identifier);
}
}
return vm.throw_completion<TypeError>(ErrorType::ObjectSetReturnedFalse);
}
if (cacheable_metadata.type == CacheableSetPropertyMetadata::Type::ChangeOwnProperty) {
cache.entries[0].shape = shape;
cache.entries[0].property_offset = cacheable_metadata.property_offset.value();
if (shape.is_dictionary()) {
cache.entries[0].shape_dictionary_generation = shape.dictionary_generation();
}
}
return {};
}
auto reference = TRY(vm.resolve_binding(identifier, strict(), &declarative_record));
TRY(reference.put_value(vm, src));
return {};
}
COLD ThrowCompletionOr<void> DeleteVariable::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto const& string = interpreter.get_identifier(m_identifier);
auto reference = TRY(vm.resolve_binding(string, strict()));
interpreter.set(dst(), Value(TRY(reference.delete_(vm))));
return {};
}
void CreateLexicalEnvironment::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& parent = as<Environment>(interpreter.get(m_parent).as_cell());
auto environment = new_declarative_environment(parent);
environment->ensure_capacity(m_capacity);
interpreter.set(m_dst, environment);
interpreter.running_execution_context().lexical_environment = environment;
}
void CreatePrivateEnvironment::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& running_execution_context = interpreter.vm().running_execution_context();
auto outer_private_environment = running_execution_context.private_environment;
running_execution_context.private_environment = new_private_environment(interpreter.vm(), outer_private_environment);
}
void CreateVariableEnvironment::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& running_execution_context = interpreter.running_execution_context();
auto var_environment = new_declarative_environment(*running_execution_context.lexical_environment);
var_environment->ensure_capacity(m_capacity);
running_execution_context.variable_environment = var_environment;
running_execution_context.lexical_environment = var_environment;
}
COLD ThrowCompletionOr<void> EnterObjectEnvironment::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto object = TRY(interpreter.get(m_object).to_object(interpreter.vm()));
auto& old_environment = interpreter.running_execution_context().lexical_environment;
auto new_environment = new_object_environment(*object, true, old_environment);
interpreter.set(m_dst, new_environment);
interpreter.running_execution_context().lexical_environment = new_environment;
return {};
}
COLD void Catch::execute_impl(Bytecode::Interpreter& interpreter) const
{
interpreter.catch_exception(dst());
}
ThrowCompletionOr<void> CreateVariable::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto const& name = interpreter.get_identifier(m_identifier);
return create_variable(interpreter.vm(), name, m_mode, m_is_global, m_is_immutable, m_is_strict);
}
void CreateRestParams::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto const arguments = interpreter.running_execution_context().arguments;
auto arguments_count = interpreter.running_execution_context().passed_argument_count;
auto array = MUST(Array::create(interpreter.realm(), 0));
for (size_t rest_index = m_rest_index; rest_index < arguments_count; ++rest_index)
array->indexed_properties().append(arguments[rest_index]);
interpreter.set(m_dst, array);
}
void CreateArguments::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto const& function = interpreter.running_execution_context().function;
auto const arguments = interpreter.running_execution_context().arguments;
auto const& environment = interpreter.running_execution_context().lexical_environment;
auto passed_arguments = ReadonlySpan<Value> { arguments.data(), interpreter.running_execution_context().passed_argument_count };
Object* arguments_object;
if (m_kind == ArgumentsKind::Mapped) {
auto const& ecma_function = static_cast<ECMAScriptFunctionObject const&>(*function);
arguments_object = create_mapped_arguments_object(interpreter.vm(), *function, ecma_function.parameter_names_for_mapped_arguments(), passed_arguments, *environment);
} else {
arguments_object = create_unmapped_arguments_object(interpreter.vm(), passed_arguments);
}
if (m_dst.has_value()) {
interpreter.set(*m_dst, arguments_object);
return;
}
if (m_is_immutable) {
MUST(environment->create_immutable_binding(interpreter.vm(), interpreter.vm().names.arguments.as_string(), false));
} else {
MUST(environment->create_mutable_binding(interpreter.vm(), interpreter.vm().names.arguments.as_string(), false));
}
MUST(environment->initialize_binding(interpreter.vm(), interpreter.vm().names.arguments.as_string(), arguments_object, Environment::InitializeBindingHint::Normal));
}
template<EnvironmentMode environment_mode, BindingInitializationMode initialization_mode>
static ThrowCompletionOr<void> initialize_or_set_binding(Interpreter& interpreter, IdentifierTableIndex identifier_index, Strict strict, Value value, EnvironmentCoordinate& cache)
{
auto& vm = interpreter.vm();
auto* environment = environment_mode == EnvironmentMode::Lexical
? interpreter.running_execution_context().lexical_environment.ptr()
: interpreter.running_execution_context().variable_environment.ptr();
if (cache.is_valid()) [[likely]] {
LibJS: Limit eval() deoptimization to the containing function scope Previously, when direct eval() was called, we would mark the entire environment chain as "permanently screwed by eval", disabling variable access caching all the way up to the global scope. This was overly conservative. According to the ECMAScript specification, a sloppy direct eval() can only inject var declarations into its containing function's variable environment - it cannot inject variables into parent function scopes. This patch makes two changes: 1. Stop propagating the "screwed by eval" flag at function boundaries. When set_permanently_screwed_by_eval() hits a FunctionEnvironment or GlobalEnvironment, it no longer continues to outer environments. 2. Check each environment during cache lookup traversal. If any environment in the path is marked as screwed, we bail to the slow path. This catches the case where we're inside a function with eval and have a cached coordinate pointing to an outer scope. The second change is necessary because eval can create local variables that shadow outer bindings. When looking up a variable from inside a function that called eval, we can't trust cached coordinates that point to outer scopes, since eval may have created a closer binding. This improves performance for code with nested functions where an inner function uses eval but parent functions perform many variable accesses. The parent functions can now use cached environment coordinates. All 29 new tests verify behavior matches V8.
2026-01-26 20:40:05 +01:00
for (size_t i = 0; i < cache.hops; ++i) {
if (environment->is_permanently_screwed_by_eval()) [[unlikely]]
goto slow_path;
environment = environment->outer_environment();
LibJS: Limit eval() deoptimization to the containing function scope Previously, when direct eval() was called, we would mark the entire environment chain as "permanently screwed by eval", disabling variable access caching all the way up to the global scope. This was overly conservative. According to the ECMAScript specification, a sloppy direct eval() can only inject var declarations into its containing function's variable environment - it cannot inject variables into parent function scopes. This patch makes two changes: 1. Stop propagating the "screwed by eval" flag at function boundaries. When set_permanently_screwed_by_eval() hits a FunctionEnvironment or GlobalEnvironment, it no longer continues to outer environments. 2. Check each environment during cache lookup traversal. If any environment in the path is marked as screwed, we bail to the slow path. This catches the case where we're inside a function with eval and have a cached coordinate pointing to an outer scope. The second change is necessary because eval can create local variables that shadow outer bindings. When looking up a variable from inside a function that called eval, we can't trust cached coordinates that point to outer scopes, since eval may have created a closer binding. This improves performance for code with nested functions where an inner function uses eval but parent functions perform many variable accesses. The parent functions can now use cached environment coordinates. All 29 new tests verify behavior matches V8.
2026-01-26 20:40:05 +01:00
}
if (!environment->is_permanently_screwed_by_eval()) [[likely]] {
if constexpr (initialization_mode == BindingInitializationMode::Initialize) {
TRY(static_cast<DeclarativeEnvironment&>(*environment).initialize_binding_direct(vm, cache.index, value, Environment::InitializeBindingHint::Normal));
} else {
TRY(static_cast<DeclarativeEnvironment&>(*environment).set_mutable_binding_direct(vm, cache.index, value, strict == Strict::Yes));
}
return {};
}
LibJS: Limit eval() deoptimization to the containing function scope Previously, when direct eval() was called, we would mark the entire environment chain as "permanently screwed by eval", disabling variable access caching all the way up to the global scope. This was overly conservative. According to the ECMAScript specification, a sloppy direct eval() can only inject var declarations into its containing function's variable environment - it cannot inject variables into parent function scopes. This patch makes two changes: 1. Stop propagating the "screwed by eval" flag at function boundaries. When set_permanently_screwed_by_eval() hits a FunctionEnvironment or GlobalEnvironment, it no longer continues to outer environments. 2. Check each environment during cache lookup traversal. If any environment in the path is marked as screwed, we bail to the slow path. This catches the case where we're inside a function with eval and have a cached coordinate pointing to an outer scope. The second change is necessary because eval can create local variables that shadow outer bindings. When looking up a variable from inside a function that called eval, we can't trust cached coordinates that point to outer scopes, since eval may have created a closer binding. This improves performance for code with nested functions where an inner function uses eval but parent functions perform many variable accesses. The parent functions can now use cached environment coordinates. All 29 new tests verify behavior matches V8.
2026-01-26 20:40:05 +01:00
slow_path:
cache = {};
}
auto reference = TRY(vm.resolve_binding(interpreter.get_identifier(identifier_index), strict, environment));
if (reference.environment_coordinate().has_value())
cache = reference.environment_coordinate().value();
if constexpr (initialization_mode == BindingInitializationMode::Initialize) {
TRY(reference.initialize_referenced_binding(vm, value));
} else if (initialization_mode == BindingInitializationMode::Set) {
TRY(reference.put_value(vm, value));
}
return {};
}
ThrowCompletionOr<void> InitializeLexicalBinding::execute_impl(Bytecode::Interpreter& interpreter) const
{
return initialize_or_set_binding<EnvironmentMode::Lexical, BindingInitializationMode::Initialize>(interpreter, m_identifier, strict(), interpreter.get(m_src), m_cache);
}
ThrowCompletionOr<void> InitializeVariableBinding::execute_impl(Bytecode::Interpreter& interpreter) const
{
return initialize_or_set_binding<EnvironmentMode::Var, BindingInitializationMode::Initialize>(interpreter, m_identifier, strict(), interpreter.get(m_src), m_cache);
}
ThrowCompletionOr<void> SetLexicalBinding::execute_impl(Bytecode::Interpreter& interpreter) const
{
return initialize_or_set_binding<EnvironmentMode::Lexical, BindingInitializationMode::Set>(interpreter, m_identifier, strict(), interpreter.get(m_src), m_cache);
}
ThrowCompletionOr<void> SetVariableBinding::execute_impl(Bytecode::Interpreter& interpreter) const
{
return initialize_or_set_binding<EnvironmentMode::Var, BindingInitializationMode::Set>(interpreter, m_identifier, strict(), interpreter.get(m_src), m_cache);
}
ThrowCompletionOr<void> GetById::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto base_value = interpreter.get(base());
auto& cache = *bit_cast<PropertyLookupCache*>(m_cache);
interpreter.set(dst(), TRY(get_by_id<GetByIdMode::Normal>(interpreter.vm(), [&] { return interpreter.get_identifier(m_base_identifier); }, [&] -> PropertyKey const& { return interpreter.get_property_key(m_property); }, base_value, base_value, cache)));
return {};
}
ThrowCompletionOr<void> GetByIdWithThis::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto base_value = interpreter.get(m_base);
auto this_value = interpreter.get(m_this_value);
auto& cache = *bit_cast<PropertyLookupCache*>(m_cache);
interpreter.set(dst(), TRY(get_by_id<GetByIdMode::Normal>(interpreter.vm(), [] { return Optional<Utf16FlyString const&> {}; }, [&] -> PropertyKey const& { return interpreter.get_property_key(m_property); }, base_value, this_value, cache)));
return {};
}
ThrowCompletionOr<void> GetLength::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto base_value = interpreter.get(base());
auto& executable = interpreter.current_executable();
auto& cache = *bit_cast<PropertyLookupCache*>(m_cache);
interpreter.set(dst(), TRY(get_by_id<GetByIdMode::Length>(interpreter.vm(), [&] { return interpreter.get_identifier(m_base_identifier); }, [&] { return executable.get_property_key(*executable.length_identifier); }, base_value, base_value, cache)));
return {};
}
ThrowCompletionOr<void> GetLengthWithThis::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto base_value = interpreter.get(m_base);
auto this_value = interpreter.get(m_this_value);
auto& executable = interpreter.current_executable();
auto& cache = *bit_cast<PropertyLookupCache*>(m_cache);
interpreter.set(dst(), TRY(get_by_id<GetByIdMode::Length>(interpreter.vm(), [] { return Optional<Utf16FlyString const&> {}; }, [&] -> PropertyKey const& { return executable.get_property_key(*executable.length_identifier); }, base_value, this_value, cache)));
return {};
}
ThrowCompletionOr<void> GetPrivateById::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto const& name = interpreter.get_identifier(m_property);
auto base_value = interpreter.get(m_base);
auto private_reference = make_private_reference(vm, base_value, name);
interpreter.set(dst(), TRY(private_reference.get_value(vm)));
return {};
}
ThrowCompletionOr<void> HasPrivateId::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto base = interpreter.get(m_base);
if (!base.is_object()) [[unlikely]]
return vm.throw_completion<TypeError>(ErrorType::InOperatorWithObject);
auto private_environment = interpreter.running_execution_context().private_environment;
VERIFY(private_environment);
auto private_name = private_environment->resolve_private_identifier(interpreter.get_identifier(m_property));
interpreter.set(dst(), Value(base.as_object().private_element_find(private_name) != nullptr));
return {};
}
ThrowCompletionOr<void> PutBySpread::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto value = interpreter.get(m_src);
auto base = interpreter.get(m_base);
// a. Let baseObj be ? ToObject(V.[[Base]]).
auto object = TRY(base.to_object(vm));
TRY(object->copy_data_properties(vm, value, {}));
return {};
}
ThrowCompletionOr<void> PutById::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto value = interpreter.get(m_src);
auto base = interpreter.get(m_base);
auto const& base_identifier = interpreter.get_identifier(m_base_identifier);
auto const& property_key = interpreter.get_property_key(m_property);
auto& cache = *bit_cast<PropertyLookupCache*>(m_cache);
TRY(put_by_property_key(vm, base, base, value, base_identifier, property_key, m_kind, strict(), &cache));
return {};
}
ThrowCompletionOr<void> PutByIdWithThis::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto value = interpreter.get(m_src);
auto base = interpreter.get(m_base);
auto const& name = interpreter.get_property_key(m_property);
auto& cache = *bit_cast<PropertyLookupCache*>(m_cache);
TRY(put_by_property_key(vm, base, interpreter.get(m_this_value), value, {}, name, m_kind, strict(), &cache));
return {};
}
ThrowCompletionOr<void> PutPrivateById::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto value = interpreter.get(m_src);
auto object = TRY(interpreter.get(m_base).to_object(vm));
auto const& name = interpreter.get_identifier(m_property);
auto private_reference = make_private_reference(vm, object, name);
TRY(private_reference.put_value(vm, value));
return {};
}
COLD ThrowCompletionOr<void> DeleteById::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto const& property_key = interpreter.get_property_key(m_property);
auto reference = Reference { interpreter.get(m_base), property_key, {}, strict() };
interpreter.set(dst(), Value(TRY(reference.delete_(vm))));
return {};
}
ThrowCompletionOr<void> ResolveThisBinding::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& cached_this_value = interpreter.reg(Register::this_value());
if (!cached_this_value.is_special_empty_value())
return {};
// OPTIMIZATION: Because the value of 'this' cannot be reassigned during a function execution, it's
// resolved once and then saved for subsequent use.
auto& running_execution_context = interpreter.running_execution_context();
if (auto function = running_execution_context.function; function && is<ECMAScriptFunctionObject>(*function) && !static_cast<ECMAScriptFunctionObject&>(*function).allocates_function_environment()) {
cached_this_value = running_execution_context.this_value.value();
} else {
auto& vm = interpreter.vm();
cached_this_value = TRY(vm.resolve_this_binding());
}
return {};
}
// https://tc39.es/ecma262/#sec-makesuperpropertyreference
ThrowCompletionOr<void> ResolveSuperBase::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
// 1. Let env be GetThisEnvironment().
auto& env = as<FunctionEnvironment>(*get_this_environment(vm));
// 2. Assert: env.HasSuperBinding() is true.
VERIFY(env.has_super_binding());
// 3. Let baseValue be ? env.GetSuperBase().
interpreter.set(dst(), TRY(env.get_super_base()));
return {};
}
void GetNewTarget::execute_impl(Bytecode::Interpreter& interpreter) const
{
interpreter.set(dst(), interpreter.vm().get_new_target());
}
void GetImportMeta::execute_impl(Bytecode::Interpreter& interpreter) const
{
interpreter.set(dst(), interpreter.vm().get_import_meta());
}
void GetLexicalEnvironment::execute_impl(Bytecode::Interpreter& interpreter) const
{
interpreter.set(dst(), interpreter.running_execution_context().lexical_environment);
}
void SetLexicalEnvironment::execute_impl(Bytecode::Interpreter& interpreter) const
{
interpreter.running_execution_context().lexical_environment = &as<Environment>(interpreter.get(m_environment).as_cell());
}
static ThrowCompletionOr<Value> dispatch_builtin_call(Bytecode::Interpreter& interpreter, Bytecode::Builtin builtin, ReadonlySpan<Operand> arguments)
{
switch (builtin) {
case Builtin::MathAbs:
return TRY(MathObject::abs_impl(interpreter.vm(), interpreter.get(arguments.data()[0])));
case Builtin::MathLog:
return TRY(MathObject::log_impl(interpreter.vm(), interpreter.get(arguments.data()[0])));
case Builtin::MathPow:
return TRY(MathObject::pow_impl(interpreter.vm(), interpreter.get(arguments.data()[0]), interpreter.get(arguments.data()[1])));
case Builtin::MathExp:
return TRY(MathObject::exp_impl(interpreter.vm(), interpreter.get(arguments.data()[0])));
case Builtin::MathCeil:
return TRY(MathObject::ceil_impl(interpreter.vm(), interpreter.get(arguments.data()[0])));
case Builtin::MathFloor:
return TRY(MathObject::floor_impl(interpreter.vm(), interpreter.get(arguments.data()[0])));
2025-04-03 11:58:30 +02:00
case Builtin::MathImul:
return TRY(MathObject::imul_impl(interpreter.vm(), interpreter.get(arguments.data()[0]), interpreter.get(arguments.data()[1])));
2025-04-03 12:01:31 +02:00
case Builtin::MathRandom:
return MathObject::random_impl();
case Builtin::MathRound:
return TRY(MathObject::round_impl(interpreter.vm(), interpreter.get(arguments.data()[0])));
case Builtin::MathSqrt:
return TRY(MathObject::sqrt_impl(interpreter.vm(), interpreter.get(arguments.data()[0])));
case Builtin::MathSin:
return TRY(MathObject::sin_impl(interpreter.vm(), interpreter.get(arguments.data()[0])));
2025-05-26 14:03:37 +03:00
case Builtin::MathCos:
return TRY(MathObject::cos_impl(interpreter.vm(), interpreter.get(arguments.data()[0])));
2025-05-26 14:07:59 +03:00
case Builtin::MathTan:
return TRY(MathObject::tan_impl(interpreter.vm(), interpreter.get(arguments.data()[0])));
case Builtin::RegExpPrototypeExec:
case Builtin::RegExpPrototypeReplace:
case Builtin::RegExpPrototypeSplit:
case Builtin::ArrayIteratorPrototypeNext:
case Builtin::MapIteratorPrototypeNext:
case Builtin::SetIteratorPrototypeNext:
case Builtin::StringIteratorPrototypeNext:
VERIFY_NOT_REACHED();
case Builtin::OrdinaryHasInstance:
VERIFY_NOT_REACHED();
case Bytecode::Builtin::__Count:
VERIFY_NOT_REACHED();
}
VERIFY_NOT_REACHED();
}
template<CallType call_type>
NEVER_INLINE static ThrowCompletionOr<void> execute_call(
Bytecode::Interpreter& interpreter,
Value callee,
Value this_value,
ReadonlySpan<Operand> arguments,
Operand dst,
Optional<StringTableIndex> const expression_string,
Strict strict)
{
TRY(throw_if_needed_for_call(interpreter, callee, call_type, expression_string));
auto& vm = interpreter.vm();
auto& function = callee.as_function();
size_t registers_and_locals_count = 0;
size_t constants_count = 0;
size_t argument_count = arguments.size();
function.get_stack_frame_size(registers_and_locals_count, constants_count, argument_count);
auto& stack = vm.interpreter_stack();
auto* stack_mark = stack.top();
auto* callee_context = stack.allocate(registers_and_locals_count, constants_count, max(arguments.size(), argument_count));
if (!callee_context) [[unlikely]]
return vm.throw_completion<InternalError>(ErrorType::CallStackSizeExceeded);
ScopeGuard deallocate_guard = [&stack, stack_mark] { stack.deallocate(stack_mark); };
auto* callee_context_argument_values = callee_context->arguments.data();
auto const callee_context_argument_count = callee_context->arguments.size();
auto const insn_argument_count = arguments.size();
for (size_t i = 0; i < insn_argument_count; ++i)
callee_context_argument_values[i] = interpreter.get(arguments.data()[i]);
for (size_t i = insn_argument_count; i < callee_context_argument_count; ++i)
callee_context_argument_values[i] = js_undefined();
callee_context->passed_argument_count = insn_argument_count;
Value retval;
if (call_type == CallType::DirectEval && callee == interpreter.realm().intrinsics().eval_function()) {
retval = TRY(perform_eval(vm, !callee_context->arguments.is_empty() ? callee_context->arguments[0] : js_undefined(), strict == Strict::Yes ? CallerMode::Strict : CallerMode::NonStrict, EvalMode::Direct));
} else if (call_type == CallType::Construct) {
retval = TRY(function.internal_construct(*callee_context, function));
} else {
retval = TRY(function.internal_call(*callee_context, this_value));
}
interpreter.set(dst, retval);
return {};
}
ThrowCompletionOr<void> Call::execute_impl(Bytecode::Interpreter& interpreter) const
{
return execute_call<CallType::Call>(interpreter, interpreter.get(m_callee), interpreter.get(m_this_value), { m_arguments, m_argument_count }, m_dst, m_expression_string, strict());
}
NEVER_INLINE ThrowCompletionOr<void> CallConstruct::execute_impl(Bytecode::Interpreter& interpreter) const
{
return execute_call<CallType::Construct>(interpreter, interpreter.get(m_callee), js_undefined(), { m_arguments, m_argument_count }, m_dst, m_expression_string, strict());
}
ThrowCompletionOr<void> CallDirectEval::execute_impl(Bytecode::Interpreter& interpreter) const
{
return execute_call<CallType::DirectEval>(interpreter, interpreter.get(m_callee), interpreter.get(m_this_value), { m_arguments, m_argument_count }, m_dst, m_expression_string, strict());
}
ThrowCompletionOr<void> CallBuiltin::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto callee = interpreter.get(m_callee);
if (callee.is_function() && callee.as_function().builtin() == m_builtin) [[likely]] {
interpreter.set(dst(), TRY(dispatch_builtin_call(interpreter, m_builtin, { m_arguments, m_argument_count })));
return {};
}
return execute_call<CallType::Call>(interpreter, callee, interpreter.get(m_this_value), { m_arguments, m_argument_count }, m_dst, m_expression_string, strict());
}
template<CallType call_type>
NEVER_INLINE static ThrowCompletionOr<void> call_with_argument_array(
Bytecode::Interpreter& interpreter,
Value callee,
Value this_value,
Value arguments,
Operand dst,
Optional<StringTableIndex> const expression_string,
Strict strict)
{
TRY(throw_if_needed_for_call(interpreter, callee, call_type, expression_string));
auto& vm = interpreter.vm();
auto& function = callee.as_function();
auto& argument_array = arguments.as_array();
auto argument_array_length = argument_array.indexed_properties().array_like_size();
size_t argument_count = argument_array_length;
size_t registers_and_locals_count = 0;
size_t constants_count = 0;
function.get_stack_frame_size(registers_and_locals_count, constants_count, argument_count);
auto& stack = vm.interpreter_stack();
auto* stack_mark = stack.top();
auto* callee_context = stack.allocate(registers_and_locals_count, constants_count, max(argument_array_length, argument_count));
if (!callee_context) [[unlikely]]
return vm.throw_completion<InternalError>(ErrorType::CallStackSizeExceeded);
ScopeGuard deallocate_guard = [&stack, stack_mark] { stack.deallocate(stack_mark); };
auto* callee_context_argument_values = callee_context->arguments.data();
auto const callee_context_argument_count = callee_context->arguments.size();
auto const insn_argument_count = argument_array_length;
for (size_t i = 0; i < insn_argument_count; ++i) {
if (auto maybe_value = argument_array.indexed_properties().get(i); maybe_value.has_value())
callee_context_argument_values[i] = maybe_value.release_value().value;
else
callee_context_argument_values[i] = js_undefined();
}
for (size_t i = insn_argument_count; i < callee_context_argument_count; ++i)
callee_context_argument_values[i] = js_undefined();
callee_context->passed_argument_count = insn_argument_count;
Value retval;
if (call_type == CallType::DirectEval && callee == interpreter.realm().intrinsics().eval_function()) {
retval = TRY(perform_eval(vm, !callee_context->arguments.is_empty() ? callee_context->arguments[0] : js_undefined(), strict == Strict::Yes ? CallerMode::Strict : CallerMode::NonStrict, EvalMode::Direct));
} else if (call_type == CallType::Construct) {
retval = TRY(function.internal_construct(*callee_context, function));
} else {
retval = TRY(function.internal_call(*callee_context, this_value));
}
interpreter.set(dst, retval);
return {};
}
ThrowCompletionOr<void> CallWithArgumentArray::execute_impl(Bytecode::Interpreter& interpreter) const
{
return call_with_argument_array<CallType::Call>(interpreter, interpreter.get(callee()), interpreter.get(this_value()), interpreter.get(arguments()), dst(), expression_string(), strict());
}
ThrowCompletionOr<void> CallDirectEvalWithArgumentArray::execute_impl(Bytecode::Interpreter& interpreter) const
{
return call_with_argument_array<CallType::DirectEval>(interpreter, interpreter.get(callee()), interpreter.get(this_value()), interpreter.get(arguments()), dst(), expression_string(), strict());
}
ThrowCompletionOr<void> CallConstructWithArgumentArray::execute_impl(Bytecode::Interpreter& interpreter) const
{
return call_with_argument_array<CallType::Construct>(interpreter, interpreter.get(callee()), js_undefined(), interpreter.get(arguments()), dst(), expression_string(), strict());
}
// 13.3.7.1 Runtime Semantics: Evaluation, https://tc39.es/ecma262/#sec-super-keyword-runtime-semantics-evaluation
ThrowCompletionOr<void> SuperCallWithArgumentArray::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
// 1. Let newTarget be GetNewTarget().
auto new_target = vm.get_new_target();
// 2. Assert: Type(newTarget) is Object.
VERIFY(new_target.is_object());
// 3. Let func be GetSuperConstructor().
auto* func = get_super_constructor(vm);
// NON-STANDARD: We're doing this step earlier to streamline control flow.
// 5. If IsConstructor(func) is false, throw a TypeError exception.
if (!Value(func).is_constructor()) [[unlikely]]
return vm.throw_completion<TypeError>(ErrorType::NotAConstructor, "Super constructor");
auto& function = static_cast<FunctionObject&>(*func);
// 4. Let argList be ? ArgumentListEvaluation of Arguments.
auto& argument_array = interpreter.get(m_arguments).as_array();
size_t argument_array_length = 0;
if (m_is_synthetic) {
argument_array_length = MUST(length_of_array_like(vm, argument_array));
} else {
argument_array_length = argument_array.indexed_properties().array_like_size();
}
size_t argument_count = argument_array_length;
size_t registers_and_locals_count = 0;
size_t constants_count = 0;
function.get_stack_frame_size(registers_and_locals_count, constants_count, argument_count);
auto& stack = vm.interpreter_stack();
auto* stack_mark = stack.top();
auto* callee_context = stack.allocate(registers_and_locals_count, constants_count, max(argument_array_length, argument_count));
if (!callee_context) [[unlikely]]
return vm.throw_completion<InternalError>(ErrorType::CallStackSizeExceeded);
ScopeGuard deallocate_guard = [&stack, stack_mark] { stack.deallocate(stack_mark); };
auto* callee_context_argument_values = callee_context->arguments.data();
auto const callee_context_argument_count = callee_context->arguments.size();
auto const insn_argument_count = argument_array_length;
if (m_is_synthetic) {
for (size_t i = 0; i < insn_argument_count; ++i)
callee_context_argument_values[i] = argument_array.get_without_side_effects(PropertyKey { i });
} else {
for (size_t i = 0; i < insn_argument_count; ++i) {
if (auto maybe_value = argument_array.indexed_properties().get(i); maybe_value.has_value())
callee_context_argument_values[i] = maybe_value.release_value().value;
else
callee_context_argument_values[i] = js_undefined();
}
}
for (size_t i = insn_argument_count; i < callee_context_argument_count; ++i)
callee_context_argument_values[i] = js_undefined();
callee_context->passed_argument_count = insn_argument_count;
// 6. Let result be ? Construct(func, argList, newTarget).
auto result = TRY(function.internal_construct(*callee_context, new_target.as_function()));
// 7. Let thisER be GetThisEnvironment().
auto& this_environment = as<FunctionEnvironment>(*get_this_environment(vm));
// 8. Perform ? thisER.BindThisValue(result).
TRY(this_environment.bind_this_value(vm, result));
// 9. Let F be thisER.[[FunctionObject]].
auto& f = as<ECMAScriptFunctionObject>(this_environment.function_object());
// 10. Assert: F is an ECMAScript function object.
// NOTE: This is implied by the strong C++ type.
// 11. Perform ? InitializeInstanceElements(result, F).
TRY(result->initialize_instance_elements(f));
// 12. Return result.
interpreter.set(m_dst, result);
return {};
}
void NewFunction::execute_impl(Bytecode::Interpreter& interpreter) const
{
interpreter.set(dst(), new_function(interpreter, m_shared_function_data_index, m_home_object));
}
void Return::execute_impl(Bytecode::Interpreter& interpreter) const
{
interpreter.do_return(interpreter.get(m_value));
}
ThrowCompletionOr<void> Increment::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto old_value = interpreter.get(dst());
// OPTIMIZATION: Fast path for Int32 values.
if (old_value.is_int32()) [[likely]] {
auto integer_value = old_value.as_i32();
if (integer_value != NumericLimits<i32>::max()) [[likely]] {
interpreter.set(dst(), Value { integer_value + 1 });
return {};
}
}
old_value = TRY(old_value.to_numeric(vm));
if (old_value.is_number())
interpreter.set(dst(), Value(old_value.as_double() + 1));
else
interpreter.set(dst(), BigInt::create(vm, old_value.as_bigint().big_integer().plus(Crypto::SignedBigInteger { 1 })));
return {};
}
ThrowCompletionOr<void> PostfixIncrement::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto old_value = interpreter.get(m_src);
// OPTIMIZATION: Fast path for Int32 values.
if (old_value.is_int32()) [[likely]] {
auto integer_value = old_value.as_i32();
if (integer_value != NumericLimits<i32>::max()) [[likely]] {
interpreter.set(m_dst, old_value);
interpreter.set(m_src, Value { integer_value + 1 });
return {};
}
}
old_value = TRY(old_value.to_numeric(vm));
interpreter.set(m_dst, old_value);
if (old_value.is_number())
interpreter.set(m_src, Value(old_value.as_double() + 1));
else
interpreter.set(m_src, BigInt::create(vm, old_value.as_bigint().big_integer().plus(Crypto::SignedBigInteger { 1 })));
return {};
}
ThrowCompletionOr<void> Decrement::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto old_value = interpreter.get(dst());
LibJS/Bytecode: Move to a new bytecode format This patch moves us away from the accumulator-based bytecode format to one with explicit source and destination registers. The new format has multiple benefits: - ~25% faster on the Kraken and Octane benchmarks :^) - Fewer instructions to accomplish the same thing - Much easier for humans to read(!) Because this change requires a fundamental shift in how bytecode is generated, it is quite comprehensive. Main implementation mechanism: generate_bytecode() virtual function now takes an optional "preferred dst" operand, which allows callers to communicate when they have an operand that would be optimal for the result to go into. It also returns an optional "actual dst" operand, which is where the completion value (if any) of the AST node is stored after the node has "executed". One thing of note that's new: because instructions can now take locals as operands, this means we got rid of the GetLocal instruction. A side-effect of that is we have to think about the temporal deadzone (TDZ) a bit differently for locals (GetLocal would previously check for empty values and interpret that as a TDZ access and throw). We now insert special ThrowIfTDZ instructions in places where a local access may be in the TDZ, to maintain the correct behavior. There are a number of progressions and regressions from this test: A number of async generator tests have been accidentally fixed while converting the implementation to the new bytecode format. It didn't seem useful to preserve bugs in the original code when converting it. Some "does eval() return the correct completion value" tests have regressed, in particular ones related to propagating the appropriate completion after control flow statements like continue and break. These are all fairly obscure issues, and I believe we can continue working on them separately. The net test262 result is a progression though. :^)
2024-02-04 08:00:54 +01:00
old_value = TRY(old_value.to_numeric(vm));
if (old_value.is_number())
interpreter.set(dst(), Value(old_value.as_double() - 1));
else
interpreter.set(dst(), BigInt::create(vm, old_value.as_bigint().big_integer().minus(Crypto::SignedBigInteger { 1 })));
return {};
}
ThrowCompletionOr<void> PostfixDecrement::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto old_value = interpreter.get(m_src);
old_value = TRY(old_value.to_numeric(vm));
interpreter.set(m_dst, old_value);
if (old_value.is_number())
interpreter.set(m_src, Value(old_value.as_double() - 1));
else
interpreter.set(m_src, BigInt::create(vm, old_value.as_bigint().big_integer().minus(Crypto::SignedBigInteger { 1 })));
return {};
}
COLD ThrowCompletionOr<void> Throw::execute_impl(Bytecode::Interpreter& interpreter) const
{
return throw_completion(interpreter.get(src()));
}
ThrowCompletionOr<void> ThrowIfNotObject::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto src = interpreter.get(m_src);
if (!src.is_object()) [[unlikely]]
return vm.throw_completion<TypeError>(ErrorType::NotAnObject, src);
return {};
}
ThrowCompletionOr<void> ThrowIfNullish::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto value = interpreter.get(m_src);
if (value.is_nullish()) [[unlikely]]
return vm.throw_completion<TypeError>(ErrorType::NotObjectCoercible, value);
return {};
}
ThrowCompletionOr<void> ThrowIfTDZ::execute_impl(Bytecode::Interpreter& interpreter) const
LibJS/Bytecode: Move to a new bytecode format This patch moves us away from the accumulator-based bytecode format to one with explicit source and destination registers. The new format has multiple benefits: - ~25% faster on the Kraken and Octane benchmarks :^) - Fewer instructions to accomplish the same thing - Much easier for humans to read(!) Because this change requires a fundamental shift in how bytecode is generated, it is quite comprehensive. Main implementation mechanism: generate_bytecode() virtual function now takes an optional "preferred dst" operand, which allows callers to communicate when they have an operand that would be optimal for the result to go into. It also returns an optional "actual dst" operand, which is where the completion value (if any) of the AST node is stored after the node has "executed". One thing of note that's new: because instructions can now take locals as operands, this means we got rid of the GetLocal instruction. A side-effect of that is we have to think about the temporal deadzone (TDZ) a bit differently for locals (GetLocal would previously check for empty values and interpret that as a TDZ access and throw). We now insert special ThrowIfTDZ instructions in places where a local access may be in the TDZ, to maintain the correct behavior. There are a number of progressions and regressions from this test: A number of async generator tests have been accidentally fixed while converting the implementation to the new bytecode format. It didn't seem useful to preserve bugs in the original code when converting it. Some "does eval() return the correct completion value" tests have regressed, in particular ones related to propagating the appropriate completion after control flow statements like continue and break. These are all fairly obscure issues, and I believe we can continue working on them separately. The net test262 result is a progression though. :^)
2024-02-04 08:00:54 +01:00
{
auto& vm = interpreter.vm();
auto value = interpreter.get(m_src);
if (value.is_special_empty_value()) [[unlikely]]
return vm.throw_completion<ReferenceError>(ErrorType::BindingNotInitialized, value);
LibJS/Bytecode: Move to a new bytecode format This patch moves us away from the accumulator-based bytecode format to one with explicit source and destination registers. The new format has multiple benefits: - ~25% faster on the Kraken and Octane benchmarks :^) - Fewer instructions to accomplish the same thing - Much easier for humans to read(!) Because this change requires a fundamental shift in how bytecode is generated, it is quite comprehensive. Main implementation mechanism: generate_bytecode() virtual function now takes an optional "preferred dst" operand, which allows callers to communicate when they have an operand that would be optimal for the result to go into. It also returns an optional "actual dst" operand, which is where the completion value (if any) of the AST node is stored after the node has "executed". One thing of note that's new: because instructions can now take locals as operands, this means we got rid of the GetLocal instruction. A side-effect of that is we have to think about the temporal deadzone (TDZ) a bit differently for locals (GetLocal would previously check for empty values and interpret that as a TDZ access and throw). We now insert special ThrowIfTDZ instructions in places where a local access may be in the TDZ, to maintain the correct behavior. There are a number of progressions and regressions from this test: A number of async generator tests have been accidentally fixed while converting the implementation to the new bytecode format. It didn't seem useful to preserve bugs in the original code when converting it. Some "does eval() return the correct completion value" tests have regressed, in particular ones related to propagating the appropriate completion after control flow statements like continue and break. These are all fairly obscure issues, and I believe we can continue working on them separately. The net test262 result is a progression though. :^)
2024-02-04 08:00:54 +01:00
return {};
}
ThrowCompletionOr<void> ThrowConstAssignment::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
return vm.throw_completion<TypeError>(ErrorType::InvalidAssignToConst);
}
void LeavePrivateEnvironment::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& running_execution_context = interpreter.vm().running_execution_context();
running_execution_context.private_environment = running_execution_context.private_environment->outer_environment();
}
void Yield::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto yielded_value = interpreter.get(m_value).is_special_empty_value() ? js_undefined() : interpreter.get(m_value);
interpreter.do_return(
interpreter.do_yield(yielded_value, m_continuation_label));
}
LibJS/Bytecode: Move to a new bytecode format This patch moves us away from the accumulator-based bytecode format to one with explicit source and destination registers. The new format has multiple benefits: - ~25% faster on the Kraken and Octane benchmarks :^) - Fewer instructions to accomplish the same thing - Much easier for humans to read(!) Because this change requires a fundamental shift in how bytecode is generated, it is quite comprehensive. Main implementation mechanism: generate_bytecode() virtual function now takes an optional "preferred dst" operand, which allows callers to communicate when they have an operand that would be optimal for the result to go into. It also returns an optional "actual dst" operand, which is where the completion value (if any) of the AST node is stored after the node has "executed". One thing of note that's new: because instructions can now take locals as operands, this means we got rid of the GetLocal instruction. A side-effect of that is we have to think about the temporal deadzone (TDZ) a bit differently for locals (GetLocal would previously check for empty values and interpret that as a TDZ access and throw). We now insert special ThrowIfTDZ instructions in places where a local access may be in the TDZ, to maintain the correct behavior. There are a number of progressions and regressions from this test: A number of async generator tests have been accidentally fixed while converting the implementation to the new bytecode format. It didn't seem useful to preserve bugs in the original code when converting it. Some "does eval() return the correct completion value" tests have regressed, in particular ones related to propagating the appropriate completion after control flow statements like continue and break. These are all fairly obscure issues, and I believe we can continue working on them separately. The net test262 result is a progression though. :^)
2024-02-04 08:00:54 +01:00
void Await::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto yielded_value = interpreter.get(m_argument).is_special_empty_value() ? js_undefined() : interpreter.get(m_argument);
// FIXME: If we get a pointer, which is not accurately representable as a double
// will cause this to explode
auto continuation_value = Value(m_continuation_label.address());
auto result = interpreter.vm().heap().allocate<GeneratorResult>(yielded_value, continuation_value, true);
interpreter.do_return(result);
}
ThrowCompletionOr<void> GetByValue::execute_impl(Bytecode::Interpreter& interpreter) const
{
interpreter.set(dst(), TRY(get_by_value(interpreter.vm(), m_base_identifier, interpreter.get(m_base), interpreter.get(m_property), interpreter.current_executable())));
return {};
}
ThrowCompletionOr<void> GetByValueWithThis::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto property_key_value = interpreter.get(m_property);
auto object = TRY(interpreter.get(m_base).to_object(vm));
auto property_key = TRY(property_key_value.to_property_key(vm));
interpreter.set(dst(), TRY(object->internal_get(property_key, interpreter.get(m_this_value))));
return {};
}
ThrowCompletionOr<void> PutByValue::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto value = interpreter.get(m_src);
auto base = interpreter.get(m_base);
auto const& base_identifier = interpreter.get_identifier(m_base_identifier);
auto property = interpreter.get(m_property);
TRY(put_by_value(vm, base, base_identifier, property, value, m_kind, strict()));
return {};
}
ThrowCompletionOr<void> PutByValueWithThis::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto value = interpreter.get(m_src);
auto base = interpreter.get(m_base);
auto this_value = interpreter.get(m_this_value);
auto property_key = TRY(interpreter.get(m_property).to_property_key(vm));
TRY(put_by_property_key(vm, base, this_value, value, {}, property_key, m_kind, strict()));
return {};
}
COLD ThrowCompletionOr<void> DeleteByValue::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto property_key = TRY(interpreter.get(m_property).to_property_key(vm));
auto reference = Reference { interpreter.get(m_base), property_key, {}, strict() };
interpreter.set(m_dst, Value(TRY(reference.delete_(vm))));
return {};
}
ThrowCompletionOr<void> GetIterator::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto iterator_record = TRY(get_iterator_impl(vm, interpreter.get(iterable()), m_hint));
interpreter.set(m_dst_iterator_object, iterator_record.iterator);
interpreter.set(m_dst_iterator_next, iterator_record.next_method);
interpreter.set(m_dst_iterator_done, Value(iterator_record.done));
return {};
}
ThrowCompletionOr<void> GetMethod::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto const& property_key = interpreter.get_property_key(m_property);
auto method = TRY(interpreter.get(m_object).get_method(vm, property_key));
interpreter.set(dst(), method ?: js_undefined());
return {};
}
NEVER_INLINE ThrowCompletionOr<void> GetObjectPropertyIterator::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto iterator_record = TRY(get_object_property_iterator(interpreter, interpreter.get(m_object)));
interpreter.set(m_dst_iterator_object, iterator_record.iterator);
interpreter.set(m_dst_iterator_next, iterator_record.next_method);
interpreter.set(m_dst_iterator_done, Value(iterator_record.done));
return {};
}
ThrowCompletionOr<void> IteratorClose::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto& iterator_object = interpreter.get(m_iterator_object).as_object();
auto iterator_next_method = interpreter.get(m_iterator_next);
auto iterator_done_property = interpreter.get(m_iterator_done).as_bool();
IteratorRecordImpl iterator_record { .done = iterator_done_property, .iterator = iterator_object, .next_method = iterator_next_method };
// FIXME: Return the value of the resulting completion.
TRY(iterator_close(vm, iterator_record, Completion { m_completion_type, interpreter.get(m_completion_value) }));
return {};
}
ThrowCompletionOr<void> IteratorNext::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto& iterator_object = interpreter.get(m_iterator_object).as_object();
auto iterator_next_method = interpreter.get(m_iterator_next);
auto iterator_done_property = interpreter.get(m_iterator_done).as_bool();
IteratorRecordImpl iterator_record { .done = iterator_done_property, .iterator = iterator_object, .next_method = iterator_next_method };
interpreter.set(m_dst, TRY(JS::iterator_next(vm, iterator_record)));
if (iterator_done_property)
interpreter.set(m_iterator_done, Value(true));
return {};
}
ThrowCompletionOr<void> IteratorNextUnpack::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto& iterator_object = interpreter.get(m_iterator_object).as_object();
auto iterator_next_method = interpreter.get(m_iterator_next);
auto iterator_done_property = interpreter.get(m_iterator_done).as_bool();
IteratorRecordImpl iterator_record { .done = iterator_done_property, .iterator = iterator_object, .next_method = iterator_next_method };
auto iteration_result_or_done = TRY(iterator_step(vm, iterator_record));
if (iterator_done_property)
interpreter.set(m_iterator_done, Value(true));
if (iteration_result_or_done.has<IterationDone>()) {
interpreter.set(m_dst_done, Value(true));
return {};
}
auto& iteration_result = iteration_result_or_done.get<IterationResult>();
interpreter.set(m_dst_done, TRY(iteration_result.done));
interpreter.set(m_dst_value, TRY(iteration_result.value));
return {};
}
NEVER_INLINE ThrowCompletionOr<void> NewClass::execute_impl(Bytecode::Interpreter& interpreter) const
{
LibJS/Bytecode: Move to a new bytecode format This patch moves us away from the accumulator-based bytecode format to one with explicit source and destination registers. The new format has multiple benefits: - ~25% faster on the Kraken and Octane benchmarks :^) - Fewer instructions to accomplish the same thing - Much easier for humans to read(!) Because this change requires a fundamental shift in how bytecode is generated, it is quite comprehensive. Main implementation mechanism: generate_bytecode() virtual function now takes an optional "preferred dst" operand, which allows callers to communicate when they have an operand that would be optimal for the result to go into. It also returns an optional "actual dst" operand, which is where the completion value (if any) of the AST node is stored after the node has "executed". One thing of note that's new: because instructions can now take locals as operands, this means we got rid of the GetLocal instruction. A side-effect of that is we have to think about the temporal deadzone (TDZ) a bit differently for locals (GetLocal would previously check for empty values and interpret that as a TDZ access and throw). We now insert special ThrowIfTDZ instructions in places where a local access may be in the TDZ, to maintain the correct behavior. There are a number of progressions and regressions from this test: A number of async generator tests have been accidentally fixed while converting the implementation to the new bytecode format. It didn't seem useful to preserve bugs in the original code when converting it. Some "does eval() return the correct completion value" tests have regressed, in particular ones related to propagating the appropriate completion after control flow statements like continue and break. These are all fairly obscure issues, and I believe we can continue working on them separately. The net test262 result is a progression though. :^)
2024-02-04 08:00:54 +01:00
Value super_class;
if (m_super_class.has_value())
super_class = interpreter.get(m_super_class.value());
Vector<Value> element_keys;
element_keys.ensure_capacity(m_element_keys_count);
for (size_t i = 0; i < m_element_keys_count; ++i) {
Value element_key;
if (m_element_keys[i].has_value())
element_key = interpreter.get(m_element_keys[i].value());
element_keys.unchecked_append(element_key);
}
auto& running_execution_context = interpreter.running_execution_context();
auto* class_environment = &as<Environment>(interpreter.get(m_class_environment).as_cell());
auto& outer_environment = running_execution_context.lexical_environment;
auto const& blueprint = interpreter.current_executable().class_blueprints[m_class_blueprint_index];
Optional<Utf16FlyString> binding_name;
Utf16FlyString class_name;
if (!blueprint.has_name && m_lhs_name.has_value()) {
class_name = interpreter.get_identifier(m_lhs_name.value());
} else {
class_name = blueprint.name;
binding_name = class_name;
}
auto* retval = TRY(construct_class(interpreter.vm(), blueprint, interpreter.current_executable(), class_environment, outer_environment, super_class, element_keys, binding_name, class_name));
interpreter.set(dst(), retval);
return {};
}
// 13.5.3.1 Runtime Semantics: Evaluation, https://tc39.es/ecma262/#sec-typeof-operator-runtime-semantics-evaluation
ThrowCompletionOr<void> TypeofBinding::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
if (m_cache.is_valid()) [[likely]] {
auto const* environment = interpreter.running_execution_context().lexical_environment.ptr();
LibJS: Limit eval() deoptimization to the containing function scope Previously, when direct eval() was called, we would mark the entire environment chain as "permanently screwed by eval", disabling variable access caching all the way up to the global scope. This was overly conservative. According to the ECMAScript specification, a sloppy direct eval() can only inject var declarations into its containing function's variable environment - it cannot inject variables into parent function scopes. This patch makes two changes: 1. Stop propagating the "screwed by eval" flag at function boundaries. When set_permanently_screwed_by_eval() hits a FunctionEnvironment or GlobalEnvironment, it no longer continues to outer environments. 2. Check each environment during cache lookup traversal. If any environment in the path is marked as screwed, we bail to the slow path. This catches the case where we're inside a function with eval and have a cached coordinate pointing to an outer scope. The second change is necessary because eval can create local variables that shadow outer bindings. When looking up a variable from inside a function that called eval, we can't trust cached coordinates that point to outer scopes, since eval may have created a closer binding. This improves performance for code with nested functions where an inner function uses eval but parent functions perform many variable accesses. The parent functions can now use cached environment coordinates. All 29 new tests verify behavior matches V8.
2026-01-26 20:40:05 +01:00
for (size_t i = 0; i < m_cache.hops; ++i) {
if (environment->is_permanently_screwed_by_eval()) [[unlikely]]
goto slow_path;
environment = environment->outer_environment();
LibJS: Limit eval() deoptimization to the containing function scope Previously, when direct eval() was called, we would mark the entire environment chain as "permanently screwed by eval", disabling variable access caching all the way up to the global scope. This was overly conservative. According to the ECMAScript specification, a sloppy direct eval() can only inject var declarations into its containing function's variable environment - it cannot inject variables into parent function scopes. This patch makes two changes: 1. Stop propagating the "screwed by eval" flag at function boundaries. When set_permanently_screwed_by_eval() hits a FunctionEnvironment or GlobalEnvironment, it no longer continues to outer environments. 2. Check each environment during cache lookup traversal. If any environment in the path is marked as screwed, we bail to the slow path. This catches the case where we're inside a function with eval and have a cached coordinate pointing to an outer scope. The second change is necessary because eval can create local variables that shadow outer bindings. When looking up a variable from inside a function that called eval, we can't trust cached coordinates that point to outer scopes, since eval may have created a closer binding. This improves performance for code with nested functions where an inner function uses eval but parent functions perform many variable accesses. The parent functions can now use cached environment coordinates. All 29 new tests verify behavior matches V8.
2026-01-26 20:40:05 +01:00
}
if (!environment->is_permanently_screwed_by_eval()) [[likely]] {
auto value = TRY(static_cast<DeclarativeEnvironment const&>(*environment).get_binding_value_direct(vm, m_cache.index));
interpreter.set(dst(), value.typeof_(vm));
return {};
}
LibJS: Limit eval() deoptimization to the containing function scope Previously, when direct eval() was called, we would mark the entire environment chain as "permanently screwed by eval", disabling variable access caching all the way up to the global scope. This was overly conservative. According to the ECMAScript specification, a sloppy direct eval() can only inject var declarations into its containing function's variable environment - it cannot inject variables into parent function scopes. This patch makes two changes: 1. Stop propagating the "screwed by eval" flag at function boundaries. When set_permanently_screwed_by_eval() hits a FunctionEnvironment or GlobalEnvironment, it no longer continues to outer environments. 2. Check each environment during cache lookup traversal. If any environment in the path is marked as screwed, we bail to the slow path. This catches the case where we're inside a function with eval and have a cached coordinate pointing to an outer scope. The second change is necessary because eval can create local variables that shadow outer bindings. When looking up a variable from inside a function that called eval, we can't trust cached coordinates that point to outer scopes, since eval may have created a closer binding. This improves performance for code with nested functions where an inner function uses eval but parent functions perform many variable accesses. The parent functions can now use cached environment coordinates. All 29 new tests verify behavior matches V8.
2026-01-26 20:40:05 +01:00
slow_path:
m_cache = {};
}
// 1. Let val be the result of evaluating UnaryExpression.
auto reference = TRY(vm.resolve_binding(interpreter.get_identifier(m_identifier), strict()));
// 2. If val is a Reference Record, then
// a. If IsUnresolvableReference(val) is true, return "undefined".
if (reference.is_unresolvable()) {
interpreter.set(dst(), PrimitiveString::create(vm, "undefined"_string));
return {};
}
// 3. Set val to ? GetValue(val).
auto value = TRY(reference.get_value(vm));
if (reference.environment_coordinate().has_value())
m_cache = reference.environment_coordinate().value();
// 4. NOTE: This step is replaced in section B.3.6.3.
// 5. Return a String according to Table 41.
interpreter.set(dst(), value.typeof_(vm));
return {};
}
void GetCompletionFields::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto const& completion_cell = static_cast<CompletionCell const&>(interpreter.get(m_completion).as_cell());
interpreter.set(m_value_dst, completion_cell.completion().value());
interpreter.set(m_type_dst, Value(to_underlying(completion_cell.completion().type())));
}
void SetCompletionType::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& completion_cell = static_cast<CompletionCell&>(interpreter.get(m_completion).as_cell());
auto completion = completion_cell.completion();
completion_cell.set_completion(Completion { m_completion_type, completion.value() });
}
ThrowCompletionOr<void> CreateImmutableBinding::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& environment = as<Environment>(interpreter.get(m_environment).as_cell());
return environment.create_immutable_binding(interpreter.vm(), interpreter.get_identifier(m_identifier), m_strict_binding);
}
ThrowCompletionOr<void> CreateMutableBinding::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& environment = as<Environment>(interpreter.get(m_environment).as_cell());
return environment.create_mutable_binding(interpreter.vm(), interpreter.get_identifier(m_identifier), m_can_be_deleted);
}
ThrowCompletionOr<void> ToObject::execute_impl(Bytecode::Interpreter& interpreter) const
{
interpreter.set(m_dst, TRY(interpreter.get(m_value).to_object(interpreter.vm())));
return {};
}
void ToBoolean::execute_impl(Bytecode::Interpreter& interpreter) const
{
interpreter.set(m_dst, Value(interpreter.get(m_value).to_boolean()));
}
ThrowCompletionOr<void> ToLength::execute_impl(Bytecode::Interpreter& interpreter) const
{
interpreter.set(m_dst, Value { TRY(interpreter.get(m_value).to_length(interpreter.vm())) });
return {};
}
void CreateAsyncFromSyncIterator::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto& realm = interpreter.realm();
auto& iterator = interpreter.get(m_iterator).as_object();
auto next_method = interpreter.get(m_next_method);
auto done = interpreter.get(m_done).as_bool();
auto iterator_record = realm.create<IteratorRecord>(iterator, next_method, done);
auto async_from_sync_iterator = create_async_from_sync_iterator(vm, iterator_record);
auto iterator_object = Object::create(realm, nullptr);
iterator_object->define_direct_property(vm.names.iterator, async_from_sync_iterator.iterator, default_attributes);
iterator_object->define_direct_property(vm.names.nextMethod, async_from_sync_iterator.next_method, default_attributes);
iterator_object->define_direct_property(vm.names.done, Value { async_from_sync_iterator.done }, default_attributes);
interpreter.set(m_dst, iterator_object);
}
ThrowCompletionOr<void> CreateDataPropertyOrThrow::execute_impl(Bytecode::Interpreter& interpreter) const
{
auto& vm = interpreter.vm();
auto& object = interpreter.get(m_object).as_object();
auto property = TRY(interpreter.get(m_property).to_property_key(vm));
auto value = interpreter.get(m_value);
TRY(object.create_data_property_or_throw(property, value));
return {};
}
void IsCallable::execute_impl(Bytecode::Interpreter& interpreter) const
{
interpreter.set(dst(), Value(interpreter.get(value()).is_function()));
}
void IsConstructor::execute_impl(Bytecode::Interpreter& interpreter) const
{
interpreter.set(dst(), Value(interpreter.get(value()).is_constructor()));
}
}