electron/patches/node/support_v8_sandboxed_pointers.patch
electron-roller[bot] 66fa1f4665
chore: bump node to v20.17.0 (33-x-y) (#43426)
* chore: bump node in DEPS to v20.17.0

* module: disallow CJS <-> ESM edges in a cycle from require(esm)

https://github.com/nodejs/node/pull/52264

* src: expose LookupAndCompile with parameters

https://github.com/nodejs/node/pull/53886

* src: fix -Wshadow warning

https://github.com/nodejs/node/pull/53885

* lib: convert WeakMaps in cjs loader with symbol properties

https://github.com/nodejs/node/pull/52095

* src: reduce unnecessary serialization of CLI options in C++

https://github.com/nodejs/node/pull/52451

* build: ensure v8_pointer_compression_sandbox is enabled on 64bit

https://github.com/nodejs/node/pull/53884

* lib: improve error message when index not found on cjs

https://github.com/nodejs/node/pull/53859

* src,lib: expose getCategoryEnabledBuffer to use on node.http

https://github.com/nodejs/node/pull/53602

* deps: update c-ares to v1.32.2

https://github.com/nodejs/node/pull/53865

* chore: update patch indices

* deps: update V8 to 12.2

https://github.com/nodejs/node/pull/51362

* stream: Expose DuplexPair API

https://github.com/nodejs/node/pull/34111

---------

Co-authored-by: electron-roller[bot] <84116207+electron-roller[bot]@users.noreply.github.com>
Co-authored-by: Shelley Vohr <shelley.vohr@gmail.com>
2024-08-27 15:03:21 -04:00

273 lines
11 KiB
Diff

From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Jeremy Rose <japthorp@slack-corp.com>
Date: Tue, 21 Jun 2022 10:04:21 -0700
Subject: support V8 sandboxed pointers
This refactors several allocators to allocate within the V8 memory cage,
allowing them to be compatible with the V8_SANDBOXED_POINTERS feature.
diff --git a/src/api/environment.cc b/src/api/environment.cc
index 60be2353cf0e77287dfda965c820cf36655a7ce5..fe41619f45913fe31a59771c8d1af6cde3b89f66 100644
--- a/src/api/environment.cc
+++ b/src/api/environment.cc
@@ -101,6 +101,14 @@ MaybeLocal<Value> PrepareStackTraceCallback(Local<Context> context,
return result;
}
+NodeArrayBufferAllocator::NodeArrayBufferAllocator() {
+ zero_fill_field_ = static_cast<uint32_t*>(allocator_->Allocate(sizeof(*zero_fill_field_)));
+}
+
+NodeArrayBufferAllocator::~NodeArrayBufferAllocator() {
+ allocator_->Free(zero_fill_field_, sizeof(*zero_fill_field_));
+}
+
void* NodeArrayBufferAllocator::Allocate(size_t size) {
void* ret;
if (zero_fill_field_ || per_process::cli_options->zero_fill_all_buffers)
diff --git a/src/crypto/crypto_util.cc b/src/crypto/crypto_util.cc
index 63d971e1fe6b861e29c12f04563701b01fdfb976..f39652a6f5196531cd78ce74e91076b1b9e970ca 100644
--- a/src/crypto/crypto_util.cc
+++ b/src/crypto/crypto_util.cc
@@ -348,10 +348,35 @@ ByteSource& ByteSource::operator=(ByteSource&& other) noexcept {
return *this;
}
-std::unique_ptr<BackingStore> ByteSource::ReleaseToBackingStore() {
+std::unique_ptr<BackingStore> ByteSource::ReleaseToBackingStore(Environment* env) {
// It's ok for allocated_data_ to be nullptr but
// only if size_ is zero.
CHECK_IMPLIES(size_ > 0, allocated_data_ != nullptr);
+#if defined(V8_ENABLE_SANDBOX)
+ // When V8 sandboxed pointers are enabled, we have to copy into the memory
+ // cage. We still want to ensure we erase the data on free though, so
+ // provide a custom deleter that calls OPENSSL_cleanse.
+ if (!size())
+ return ArrayBuffer::NewBackingStore(env->isolate(), 0);
+ std::unique_ptr<ArrayBuffer::Allocator> allocator(ArrayBuffer::Allocator::NewDefaultAllocator());
+ void* v8_data = allocator->Allocate(size());
+ CHECK(v8_data);
+ memcpy(v8_data, allocated_data_, size());
+ OPENSSL_clear_free(allocated_data_, size());
+ std::unique_ptr<BackingStore> ptr = ArrayBuffer::NewBackingStore(
+ v8_data,
+ size(),
+ [](void* data, size_t length, void*) {
+ OPENSSL_cleanse(data, length);
+ std::unique_ptr<ArrayBuffer::Allocator> allocator(ArrayBuffer::Allocator::NewDefaultAllocator());
+ allocator->Free(data, length);
+ }, nullptr);
+ CHECK(ptr);
+ allocated_data_ = nullptr;
+ data_ = nullptr;
+ size_ = 0;
+ return ptr;
+#else
std::unique_ptr<BackingStore> ptr = ArrayBuffer::NewBackingStore(
allocated_data_,
size(),
@@ -363,10 +388,11 @@ std::unique_ptr<BackingStore> ByteSource::ReleaseToBackingStore() {
data_ = nullptr;
size_ = 0;
return ptr;
+#endif // defined(V8_ENABLE_SANDBOX)
}
Local<ArrayBuffer> ByteSource::ToArrayBuffer(Environment* env) {
- std::unique_ptr<BackingStore> store = ReleaseToBackingStore();
+ std::unique_ptr<BackingStore> store = ReleaseToBackingStore(env);
return ArrayBuffer::New(env->isolate(), std::move(store));
}
@@ -703,6 +729,16 @@ namespace {
// in which case this has the same semantics as
// using OPENSSL_malloc. However, if the secure heap is
// initialized, SecureBuffer will automatically use it.
+#if defined(V8_ENABLE_SANDBOX)
+// When V8 sandboxed pointers are enabled, the secure heap cannot be used as
+// all ArrayBuffers must be allocated inside the V8 memory cage.
+void SecureBuffer(const FunctionCallbackInfo<Value>& args) {
+ CHECK(args[0]->IsUint32());
+ uint32_t len = args[0].As<Uint32>()->Value();
+ Local<ArrayBuffer> buffer = ArrayBuffer::New(args.GetIsolate(), len);
+ args.GetReturnValue().Set(Uint8Array::New(buffer, 0, len));
+}
+#else
void SecureBuffer(const FunctionCallbackInfo<Value>& args) {
CHECK(args[0]->IsUint32());
Environment* env = Environment::GetCurrent(args);
@@ -724,6 +760,7 @@ void SecureBuffer(const FunctionCallbackInfo<Value>& args) {
Local<ArrayBuffer> buffer = ArrayBuffer::New(env->isolate(), store);
args.GetReturnValue().Set(Uint8Array::New(buffer, 0, len));
}
+#endif // defined(V8_ENABLE_SANDBOX)
void SecureHeapUsed(const FunctionCallbackInfo<Value>& args) {
#ifndef OPENSSL_IS_BORINGSSL
diff --git a/src/crypto/crypto_util.h b/src/crypto/crypto_util.h
index 0ae2946e5e5884d6d095c039a3b4dc537e0f29a2..8c06e81de3ae93d82bf7eaf1bada77164a130695 100644
--- a/src/crypto/crypto_util.h
+++ b/src/crypto/crypto_util.h
@@ -280,7 +280,7 @@ class ByteSource {
// Creates a v8::BackingStore that takes over responsibility for
// any allocated data. The ByteSource will be reset with size = 0
// after being called.
- std::unique_ptr<v8::BackingStore> ReleaseToBackingStore();
+ std::unique_ptr<v8::BackingStore> ReleaseToBackingStore(Environment* env);
v8::Local<v8::ArrayBuffer> ToArrayBuffer(Environment* env);
diff --git a/src/node_i18n.cc b/src/node_i18n.cc
index d45325954d980724f80d49298bbe837197237a9b..ccea18080142bd9cba3765dbbec61c2a63406667 100644
--- a/src/node_i18n.cc
+++ b/src/node_i18n.cc
@@ -105,7 +105,7 @@ namespace {
template <typename T>
MaybeLocal<Object> ToBufferEndian(Environment* env, MaybeStackBuffer<T>* buf) {
- MaybeLocal<Object> ret = Buffer::New(env, buf);
+ MaybeLocal<Object> ret = Buffer::Copy(env, reinterpret_cast<char*>(buf->out()), buf->length() * sizeof(T));
if (ret.IsEmpty())
return ret;
diff --git a/src/node_internals.h b/src/node_internals.h
index e04fadd7f83e52fe965d8c73916a56f60425ba3d..5dff80ee287256ba40bfa496df6db60a18fbb9d7 100644
--- a/src/node_internals.h
+++ b/src/node_internals.h
@@ -118,7 +118,9 @@ v8::Maybe<bool> InitializePrimordials(v8::Local<v8::Context> context);
class NodeArrayBufferAllocator : public ArrayBufferAllocator {
public:
- inline uint32_t* zero_fill_field() { return &zero_fill_field_; }
+ NodeArrayBufferAllocator();
+ ~NodeArrayBufferAllocator() override;
+ inline uint32_t* zero_fill_field() { return zero_fill_field_; }
void* Allocate(size_t size) override; // Defined in src/node.cc
void* AllocateUninitialized(size_t size) override;
@@ -136,7 +138,7 @@ class NodeArrayBufferAllocator : public ArrayBufferAllocator {
}
private:
- uint32_t zero_fill_field_ = 1; // Boolean but exposed as uint32 to JS land.
+ uint32_t* zero_fill_field_ = nullptr; // Boolean but exposed as uint32 to JS land.
std::atomic<size_t> total_mem_usage_ {0};
// Delegate to V8's allocator for compatibility with the V8 memory cage.
diff --git a/src/node_serdes.cc b/src/node_serdes.cc
index 6698a1df81cb4e0947c86fb30c2d77fca8e2d9d1..dad297652b347819805b09fbfd869f1d037e31c1 100644
--- a/src/node_serdes.cc
+++ b/src/node_serdes.cc
@@ -29,6 +29,11 @@ using v8::ValueSerializer;
namespace serdes {
+v8::ArrayBuffer::Allocator* GetAllocator() {
+ static v8::ArrayBuffer::Allocator* allocator = v8::ArrayBuffer::Allocator::NewDefaultAllocator();
+ return allocator;
+}
+
class SerializerContext : public BaseObject,
public ValueSerializer::Delegate {
public:
@@ -37,10 +42,15 @@ class SerializerContext : public BaseObject,
~SerializerContext() override = default;
+ // v8::ValueSerializer::Delegate
void ThrowDataCloneError(Local<String> message) override;
Maybe<bool> WriteHostObject(Isolate* isolate, Local<Object> object) override;
Maybe<uint32_t> GetSharedArrayBufferId(
Isolate* isolate, Local<SharedArrayBuffer> shared_array_buffer) override;
+ void* ReallocateBufferMemory(void* old_buffer,
+ size_t old_length,
+ size_t* new_length) override;
+ void FreeBufferMemory(void* buffer) override;
static void SetTreatArrayBufferViewsAsHostObjects(
const FunctionCallbackInfo<Value>& args);
@@ -61,6 +71,7 @@ class SerializerContext : public BaseObject,
private:
ValueSerializer serializer_;
+ size_t last_length_ = 0;
};
class DeserializerContext : public BaseObject,
@@ -144,6 +155,24 @@ Maybe<uint32_t> SerializerContext::GetSharedArrayBufferId(
return id.ToLocalChecked()->Uint32Value(env()->context());
}
+void* SerializerContext::ReallocateBufferMemory(void* old_buffer,
+ size_t requested_size,
+ size_t* new_length) {
+ *new_length = std::max(static_cast<size_t>(4096), requested_size);
+ if (old_buffer) {
+ void* ret = GetAllocator()->Reallocate(old_buffer, last_length_, *new_length);
+ last_length_ = *new_length;
+ return ret;
+ } else {
+ last_length_ = *new_length;
+ return GetAllocator()->Allocate(*new_length);
+ }
+}
+
+void SerializerContext::FreeBufferMemory(void* buffer) {
+ GetAllocator()->Free(buffer, last_length_);
+}
+
Maybe<bool> SerializerContext::WriteHostObject(Isolate* isolate,
Local<Object> input) {
MaybeLocal<Value> ret;
@@ -209,9 +238,14 @@ void SerializerContext::ReleaseBuffer(const FunctionCallbackInfo<Value>& args) {
// Note: Both ValueSerializer and this Buffer::New() variant use malloc()
// as the underlying allocator.
std::pair<uint8_t*, size_t> ret = ctx->serializer_.Release();
- auto buf = Buffer::New(ctx->env(),
- reinterpret_cast<char*>(ret.first),
- ret.second);
+ std::unique_ptr<v8::BackingStore> bs =
+ v8::ArrayBuffer::NewBackingStore(reinterpret_cast<char*>(ret.first), ret.second,
+ [](void* data, size_t length, void* deleter_data) {
+ if (data) GetAllocator()->Free(reinterpret_cast<char*>(data), length);
+ }, nullptr);
+ Local<ArrayBuffer> ab = v8::ArrayBuffer::New(ctx->env()->isolate(), std::move(bs));
+
+ auto buf = Buffer::New(ctx->env(), ab, 0, ret.second);
if (!buf.IsEmpty()) {
args.GetReturnValue().Set(buf.ToLocalChecked());
diff --git a/src/node_trace_events.cc b/src/node_trace_events.cc
index c4960ee1427e3b29b873135a815a7a09bedcfb73..2c6d8cdeb37f7dce9f29b8f3b260036ae23e6fb5 100644
--- a/src/node_trace_events.cc
+++ b/src/node_trace_events.cc
@@ -132,12 +132,28 @@ static void GetCategoryEnabledBuffer(const FunctionCallbackInfo<Value>& args) {
const uint8_t* enabled_pointer =
TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category_name.out());
uint8_t* enabled_pointer_cast = const_cast<uint8_t*>(enabled_pointer);
+ uint8_t size = sizeof(*enabled_pointer_cast);
+#if defined(V8_ENABLE_SANDBOX)
+ std::unique_ptr<ArrayBuffer::Allocator> allocator(ArrayBuffer::Allocator::NewDefaultAllocator());
+ void* v8_data = allocator->Allocate(size);
+ CHECK(v8_data);
+ memcpy(v8_data, enabled_pointer_cast, size);
+ std::unique_ptr<BackingStore> bs = ArrayBuffer::NewBackingStore(
+ v8_data,
+ size,
+ [](void* data, size_t length, void*) {
+ std::unique_ptr<ArrayBuffer::Allocator> allocator(ArrayBuffer::Allocator::NewDefaultAllocator());
+ allocator->Free(data, length);
+ }, nullptr);
+#else
std::unique_ptr<BackingStore> bs = ArrayBuffer::NewBackingStore(
enabled_pointer_cast,
- sizeof(*enabled_pointer_cast),
+ size,
[](void*, size_t, void*) {},
nullptr);
+#endif
+
auto ab = ArrayBuffer::New(isolate, std::move(bs));
v8::Local<Uint8Array> u8 = v8::Uint8Array::New(ab, 0, 1);