chore: bump chromium to 130.0.6669.0 (33-x-y) (#43403)

* chore: bump chromium in DEPS to 130.0.6669.0

Co-authored-by: electron-roller[bot] <84116207+electron-roller[bot]@users.noreply.github.com>

* 5789734: Consolidate all the accessibility scale factor utility code into one file

5789734

Co-authored-by: Shelley Vohr <shelley.vohr@gmail.com>

* 5798543: [autofill] Don't emit autofill audit reports if inspector not connected

5798543

Co-authored-by: Shelley Vohr <shelley.vohr@gmail.com>

* 5797073: [wasm] Spill all loop inputs before entering loop

5797073

Co-authored-by: Shelley Vohr <shelley.vohr@gmail.com>

* chore: fixup patch indices

Co-authored-by: Shelley Vohr <shelley.vohr@gmail.com>

* 5795224: Version 13.0.0

5795224

Co-authored-by: Shelley Vohr <shelley.vohr@gmail.com>

---------

Co-authored-by: trop[bot] <37223003+trop[bot]@users.noreply.github.com>
Co-authored-by: electron-roller[bot] <84116207+electron-roller[bot]@users.noreply.github.com>
Co-authored-by: Shelley Vohr <shelley.vohr@gmail.com>
This commit is contained in:
trop[bot] 2024-08-21 20:20:13 +02:00 committed by GitHub
parent 6263a39d9a
commit dbd7840254
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
28 changed files with 93 additions and 198 deletions

View file

@ -1,4 +1,3 @@
chore_allow_customizing_microtask_policy_per_context.patch
deps_add_v8_object_setinternalfieldfornodecore.patch
fix_disable_scope_reuse_associated_dchecks.patch
spill_all_loop_inputs_before_entering_loop.patch

View file

@ -46,10 +46,10 @@ index 71a6c2c9c149116caa410d25aef4087774b81b44..ad8416ea2500f10aad31f25da96b235f
V8_INLINE static void* GetAlignedPointerFromInternalField(
const BasicTracedReference<Object>& object, int index) {
diff --git a/src/api/api.cc b/src/api/api.cc
index 5ab671c8c4168ac7ccd9d18ea4b9fda16734e4ad..46d56957a5845745fa07ae3db79dd753f0004a5d 100644
index c9b326ef0b2ccaeb8c62a210385fd38be8e960e1..1cbfa5aac7ddf8f64670e8f66fd51c0dd3b2c851 100644
--- a/src/api/api.cc
+++ b/src/api/api.cc
@@ -6384,14 +6384,33 @@ Local<Data> v8::Object::SlowGetInternalField(int index) {
@@ -6379,14 +6379,33 @@ Local<Data> v8::Object::SlowGetInternalField(int index) {
i::Cast<i::JSObject>(*obj)->GetEmbedderField(index), isolate));
}

View file

@ -42,10 +42,10 @@ index 57a9dca1a84dee95d36c2b296fc170399db3e213..5db78a650068faa0bacf05b13d86860c
#endif
if (!scope->is_function_scope() ||
diff --git a/src/flags/flag-definitions.h b/src/flags/flag-definitions.h
index 2c9c12e6cf4a9ebfdc5cb08ef7a53b0ca77222bb..98ce94ec8d3af5afdc3a1308e65f7122e46983f2 100644
index fe346c46296c9b665e0a2bdd5127e929f6c91b3c..80790f627c069572f87a803bb16a312ae75e3dbe 100644
--- a/src/flags/flag-definitions.h
+++ b/src/flags/flag-definitions.h
@@ -984,7 +984,12 @@ DEFINE_BOOL(trace_track_allocation_sites, false,
@@ -964,7 +964,12 @@ DEFINE_BOOL(trace_track_allocation_sites, false,
DEFINE_BOOL(trace_migration, false, "trace object migration")
DEFINE_BOOL(trace_generalization, false, "trace map generalization")

View file

@ -1,104 +0,0 @@
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Clemens Backes <clemensb@chromium.org>
Date: Tue, 20 Aug 2024 12:25:40 +0200
Subject: Spill all loop inputs before entering loop
This avoids having to load the value back into a register if it was
spilled inside of the loop.
R=jkummerow@chromium.org
Fixed: chromium:360700873
Change-Id: I24f5deacebc893293e8a3c007e9f070c7fa0ccd2
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/5797073
Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/main@{#95711}
diff --git a/src/wasm/baseline/liftoff-assembler.cc b/src/wasm/baseline/liftoff-assembler.cc
index ffc8dbb4f99ff8d340efd705f2059e39e046a47f..e1ca02f7adc84ae9e82e9f9b668abde9eb37b94a 100644
--- a/src/wasm/baseline/liftoff-assembler.cc
+++ b/src/wasm/baseline/liftoff-assembler.cc
@@ -424,29 +424,10 @@ void LiftoffAssembler::DropExceptionValueAtOffset(int offset) {
cache_state_.stack_state.pop_back();
}
-void LiftoffAssembler::PrepareLoopArgs(int num) {
- for (int i = 0; i < num; ++i) {
- VarState& slot = cache_state_.stack_state.end()[-1 - i];
- if (slot.is_stack()) continue;
- RegClass rc = reg_class_for(slot.kind());
- if (slot.is_reg()) {
- if (cache_state_.get_use_count(slot.reg()) > 1) {
- // If the register is used more than once, we cannot use it for the
- // merge. Move it to an unused register instead.
- LiftoffRegList pinned;
- pinned.set(slot.reg());
- LiftoffRegister dst_reg = GetUnusedRegister(rc, pinned);
- Move(dst_reg, slot.reg(), slot.kind());
- cache_state_.dec_used(slot.reg());
- cache_state_.inc_used(dst_reg);
- slot.MakeRegister(dst_reg);
- }
- continue;
- }
- LiftoffRegister reg = GetUnusedRegister(rc, {});
- LoadConstant(reg, slot.constant());
- slot.MakeRegister(reg);
- cache_state_.inc_used(reg);
+void LiftoffAssembler::SpillLoopArgs(int num) {
+ for (VarState& slot :
+ base::VectorOf(cache_state_.stack_state.end() - num, num)) {
+ Spill(&slot);
}
}
@@ -664,14 +645,14 @@ void LiftoffAssembler::Spill(VarState* slot) {
}
void LiftoffAssembler::SpillLocals() {
- for (uint32_t i = 0; i < num_locals_; ++i) {
- Spill(&cache_state_.stack_state[i]);
+ for (VarState& local_slot :
+ base::VectorOf(cache_state_.stack_state.data(), num_locals_)) {
+ Spill(&local_slot);
}
}
void LiftoffAssembler::SpillAllRegisters() {
- for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) {
- auto& slot = cache_state_.stack_state[i];
+ for (VarState& slot : cache_state_.stack_state) {
if (!slot.is_reg()) continue;
Spill(slot.offset(), slot.reg(), slot.kind());
slot.MakeStack();
diff --git a/src/wasm/baseline/liftoff-assembler.h b/src/wasm/baseline/liftoff-assembler.h
index 2fb62ff39c65ad2a621b51628716265b11cb4bd0..274c78c2ed4b9d8968df19915d33caf96c5017e0 100644
--- a/src/wasm/baseline/liftoff-assembler.h
+++ b/src/wasm/baseline/liftoff-assembler.h
@@ -477,9 +477,9 @@ class LiftoffAssembler : public MacroAssembler {
// the bottom of the stack.
void DropExceptionValueAtOffset(int offset);
- // Ensure that the loop inputs are either in a register or spilled to the
- // stack, so that we can merge different values on the back-edge.
- void PrepareLoopArgs(int num);
+ // Spill all loop inputs to the stack to free registers and to ensure that we
+ // can merge different values on the back-edge.
+ void SpillLoopArgs(int num);
V8_INLINE static int NextSpillOffset(ValueKind kind, int top_spill_offset);
V8_INLINE int NextSpillOffset(ValueKind kind);
diff --git a/src/wasm/baseline/liftoff-compiler.cc b/src/wasm/baseline/liftoff-compiler.cc
index e4a894d2b364c4546d92819ab1ce8fb11eabfaff..71c3ad9aa1742b93bb4bf5fc707077dff7f0e92e 100644
--- a/src/wasm/baseline/liftoff-compiler.cc
+++ b/src/wasm/baseline/liftoff-compiler.cc
@@ -1395,7 +1395,7 @@ class LiftoffCompiler {
// pre-analysis of the function.
__ SpillLocals();
- __ PrepareLoopArgs(loop->start_merge.arity);
+ __ SpillLoopArgs(loop->start_merge.arity);
// Loop labels bind at the beginning of the block.
__ bind(loop->label.get());