diff --git a/patches/chromium/.patches b/patches/chromium/.patches index 31fe6458f7b2..f327f6100f27 100644 --- a/patches/chromium/.patches +++ b/patches/chromium/.patches @@ -145,3 +145,4 @@ wayland_support_outgoing_dnd_sessions_with_no_offered_mime_types.patch cherry-pick-3a6ff45cc3f4.patch cherry-pick-a51e7ebb7663.patch cherry-pick-f3300abe2fcd.patch +remove_persistentmemoryallocator_getallocsize.patch diff --git a/patches/chromium/cherry-pick-3a6ff45cc3f4.patch b/patches/chromium/cherry-pick-3a6ff45cc3f4.patch index 16c02539b2c6..87a4f2714330 100644 --- a/patches/chromium/cherry-pick-3a6ff45cc3f4.patch +++ b/patches/chromium/cherry-pick-3a6ff45cc3f4.patch @@ -1,7 +1,7 @@ -From 3a6ff45cc3f48a359772f81c512c512b4f2d2643 Mon Sep 17 00:00:00 2001 +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 From: Stefan Zager Date: Sat, 14 Dec 2024 11:06:00 -0800 -Subject: [PATCH] [M130] Prevent ImageData from being reclaimed while in use +Subject: Prevent ImageData from being reclaimed while in use Cherry-picked from: https://chromium-review.googlesource.com/c/chromium/src/+/5990752 @@ -14,13 +14,12 @@ Owners-Override: Prudhvikumar Bommana Commit-Queue: Prudhvikumar Bommana Cr-Commit-Position: refs/branch-heads/6723@{#2713} Cr-Branched-From: 985f2961df230630f9cbd75bd6fe463009855a11-refs/heads/main@{#1356013} ---- diff --git a/cc/tiles/gpu_image_decode_cache.cc b/cc/tiles/gpu_image_decode_cache.cc -index df1d24b..39fa3ff 100644 +index 39f0a6e1ad2da6056a702ac2e920a0adb5d153f1..d9b8130af2b2830903cf1b9d91e3f8c1ccaff3dd 100644 --- a/cc/tiles/gpu_image_decode_cache.cc +++ b/cc/tiles/gpu_image_decode_cache.cc -@@ -2401,6 +2401,9 @@ +@@ -2398,6 +2398,9 @@ void GpuImageDecodeCache::DecodeImageIfNecessary( image_data->decode.ResetData(); @@ -30,7 +29,7 @@ index df1d24b..39fa3ff 100644 // Decode the image into `aux_image_data` while the lock is not held. DecodedAuxImageData aux_image_data[kAuxImageCount]; { -@@ -2728,6 +2731,9 @@ +@@ -2725,6 +2728,9 @@ void GpuImageDecodeCache::UploadImageIfNecessary_GpuCpu_YUVA( sk_sp uploaded_v_image = image_data->decode.image(2, AuxImage::kDefault); @@ -40,7 +39,7 @@ index df1d24b..39fa3ff 100644 // For kGpu, we upload and color convert (if necessary). if (image_data->mode == DecodedDataMode::kGpu) { DCHECK(!use_transfer_cache_); -@@ -2815,6 +2821,9 @@ +@@ -2812,6 +2818,9 @@ void GpuImageDecodeCache::UploadImageIfNecessary_GpuCpu_RGBA( DCHECK(!use_transfer_cache_); DCHECK(!image_data->info.yuva.has_value()); diff --git a/patches/chromium/cherry-pick-a51e7ebb7663.patch b/patches/chromium/cherry-pick-a51e7ebb7663.patch index 583e2fda3a9f..a69c689708c5 100644 --- a/patches/chromium/cherry-pick-a51e7ebb7663.patch +++ b/patches/chromium/cherry-pick-a51e7ebb7663.patch @@ -1,7 +1,7 @@ -From a51e7ebb7663b40ed070e91669f69c64fb9179d9 Mon Sep 17 00:00:00 2001 +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 From: Guido Urdaneta Date: Wed, 18 Dec 2024 15:21:59 -0800 -Subject: [PATCH] [M126-LTS][VideoCaptureManager] Replace raw pointers with scoped_refptr +Subject: Replace raw pointers with scoped_refptr VCM used VideoCaptureController raw pointers in a number of places, including as a field in VCM::CaptureDeviceStartRequest. @@ -22,10 +22,9 @@ Reviewed-by: Guido Urdaneta Commit-Queue: Gyuyoung Kim (xWF) Cr-Commit-Position: refs/branch-heads/6478@{#2009} Cr-Branched-From: e6143acc03189c5e52959545b110d6d17ecd5286-refs/heads/main@{#1300313} ---- diff --git a/content/browser/renderer_host/media/video_capture_manager.cc b/content/browser/renderer_host/media/video_capture_manager.cc -index 3db89a04..11b6caf 100644 +index 4e3ccc0050d63466d7dcbd221c4f580939668ce0..fbc168cfd26d53711dd16f337287110a548a2215 100644 --- a/content/browser/renderer_host/media/video_capture_manager.cc +++ b/content/browser/renderer_host/media/video_capture_manager.cc @@ -15,6 +15,7 @@ @@ -36,7 +35,7 @@ index 3db89a04..11b6caf 100644 #include "base/metrics/histogram_functions.h" #include "base/observer_list.h" #include "base/ranges/algorithm.h" -@@ -61,12 +62,14 @@ +@@ -61,12 +62,14 @@ namespace content { class VideoCaptureManager::CaptureDeviceStartRequest { public: CaptureDeviceStartRequest( @@ -53,7 +52,7 @@ index 3db89a04..11b6caf 100644 const base::UnguessableToken& session_id() const { return session_id_; } media::VideoCaptureParams params() const { return params_; } -@@ -76,7 +79,7 @@ +@@ -76,7 +79,7 @@ class VideoCaptureManager::CaptureDeviceStartRequest { } private: @@ -62,7 +61,7 @@ index 3db89a04..11b6caf 100644 const base::UnguessableToken session_id_; const media::VideoCaptureParams params_; mojo::PendingRemote -@@ -84,12 +87,12 @@ +@@ -84,12 +87,12 @@ class VideoCaptureManager::CaptureDeviceStartRequest { }; VideoCaptureManager::CaptureDeviceStartRequest::CaptureDeviceStartRequest( @@ -77,7 +76,7 @@ index 3db89a04..11b6caf 100644 session_id_(session_id), params_(params), video_effects_processor_(std::move(video_effects_processor)) {} -@@ -258,14 +261,15 @@ +@@ -258,14 +261,15 @@ void VideoCaptureManager::ApplySubCaptureTarget( void VideoCaptureManager::QueueStartDevice( const media::VideoCaptureSessionId& session_id, @@ -96,7 +95,7 @@ index 3db89a04..11b6caf 100644 if (device_start_request_queue_.size() == 1) ProcessDeviceStartRequestQueue(); } -@@ -311,7 +315,8 @@ +@@ -311,7 +315,8 @@ void VideoCaptureManager::ProcessDeviceStartRequestQueue() { if (request == device_start_request_queue_.end()) return; @@ -106,7 +105,7 @@ index 3db89a04..11b6caf 100644 EmitLogMessage("VideoCaptureManager::ProcessDeviceStartRequestQueue", 3); // The unit test VideoCaptureManagerTest.OpenNotExisting requires us to fail -@@ -329,7 +334,7 @@ +@@ -329,7 +334,7 @@ void VideoCaptureManager::ProcessDeviceStartRequestQueue() { GetDeviceInfoById(controller->device_id()); if (!device_info) { OnDeviceLaunchFailed( @@ -115,7 +114,7 @@ index 3db89a04..11b6caf 100644 media::VideoCaptureError:: kVideoCaptureManagerProcessDeviceStartQueueDeviceInfoNotFound); return; -@@ -350,7 +355,7 @@ +@@ -350,7 +355,7 @@ void VideoCaptureManager::ProcessDeviceStartRequestQueue() { base::BindOnce([](scoped_refptr, scoped_refptr) {}, scoped_refptr(this), @@ -124,7 +123,7 @@ index 3db89a04..11b6caf 100644 request->TakeVideoEffectsProcessor()); } -@@ -434,7 +439,7 @@ +@@ -434,7 +439,7 @@ void VideoCaptureManager::ConnectClient( EmitLogMessage(string_stream.str(), 1); } @@ -133,7 +132,7 @@ index 3db89a04..11b6caf 100644 GetOrCreateController(session_id, params); if (!controller) { std::move(done_cb).Run(nullptr); -@@ -908,7 +913,8 @@ +@@ -908,7 +913,8 @@ media::VideoCaptureDeviceInfo* VideoCaptureManager::GetDeviceInfoById( return nullptr; } @@ -143,7 +142,7 @@ index 3db89a04..11b6caf 100644 const media::VideoCaptureSessionId& capture_session_id, const media::VideoCaptureParams& params) { DCHECK_CURRENTLY_ON(BrowserThread::IO); -@@ -930,10 +936,12 @@ +@@ -930,10 +936,12 @@ VideoCaptureController* VideoCaptureManager::GetOrCreateController( return existing_device; } @@ -161,10 +160,10 @@ index 3db89a04..11b6caf 100644 } diff --git a/content/browser/renderer_host/media/video_capture_manager.h b/content/browser/renderer_host/media/video_capture_manager.h -index a93c6bb..b88052a4 100644 +index a93c6bbb9677113402831378a4d9fa38e04408e2..b88052a4e9f52c0e969e068b8af3bbe2c7fb2d49 100644 --- a/content/browser/renderer_host/media/video_capture_manager.h +++ b/content/browser/renderer_host/media/video_capture_manager.h -@@ -297,7 +297,7 @@ +@@ -297,7 +297,7 @@ class CONTENT_EXPORT VideoCaptureManager // Finds a VideoCaptureController for the indicated |capture_session_id|, // creating a fresh one if necessary. Returns nullptr if said // |capture_session_id| is invalid. @@ -173,7 +172,7 @@ index a93c6bb..b88052a4 100644 const media::VideoCaptureSessionId& capture_session_id, const media::VideoCaptureParams& params); -@@ -309,7 +309,7 @@ +@@ -309,7 +309,7 @@ class CONTENT_EXPORT VideoCaptureManager // another request pending start. void QueueStartDevice( const media::VideoCaptureSessionId& session_id, diff --git a/patches/chromium/cherry-pick-f3300abe2fcd.patch b/patches/chromium/cherry-pick-f3300abe2fcd.patch index 8b7954c90925..24d9463709ee 100644 --- a/patches/chromium/cherry-pick-f3300abe2fcd.patch +++ b/patches/chromium/cherry-pick-f3300abe2fcd.patch @@ -1,7 +1,7 @@ -From f3300abe2fcd0164794d7a782cc221d10c17f322 Mon Sep 17 00:00:00 2001 +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 From: Yoshisato Yanagisawa -Date: Mon, 06 Jan 2025 05:34:49 -0800 -Subject: [PATCH] [M130] Make AcceptLanguagesWatcher a weak persistent object +Date: Mon, 6 Jan 2025 05:34:49 -0800 +Subject: Make AcceptLanguagesWatcher a weak persistent object DedicatedWorkerOrSharedWorkerFetchContext keeps on having a pointer to the AcceptLanguagesWatcher as a raw_ptr. Even if the implementing @@ -28,13 +28,12 @@ Auto-Submit: Daniel Yip Owners-Override: Daniel Yip Cr-Commit-Position: refs/branch-heads/6723@{#2761} Cr-Branched-From: 985f2961df230630f9cbd75bd6fe463009855a11-refs/heads/main@{#1356013} ---- diff --git a/third_party/blink/public/platform/web_worker_fetch_context.h b/third_party/blink/public/platform/web_worker_fetch_context.h -index 6f2a2e9..cf8bfce4 100644 +index 83f6773cf4d8542d042b74697e36aa2ea3329bbf..c41e1aa452994a66c58e40c8e936134dd6375071 100644 --- a/third_party/blink/public/platform/web_worker_fetch_context.h +++ b/third_party/blink/public/platform/web_worker_fetch_context.h -@@ -33,19 +33,12 @@ +@@ -33,19 +33,12 @@ class SiteForCookies; namespace blink { @@ -56,10 +55,10 @@ index 6f2a2e9..cf8bfce4 100644 // passed to a worker (dedicated, shared and service worker) and initialized on // the worker thread by InitializeOnWorkerThread(). It contains information diff --git a/third_party/blink/renderer/core/workers/worker_navigator.cc b/third_party/blink/renderer/core/workers/worker_navigator.cc -index 344382b..a4159a4 100644 +index 344382b6f661b4cb8b0d78b070bc0d5c603d28f8..a4159a402ed48f2df233bfc1115e4b0438e1aaaf 100644 --- a/third_party/blink/renderer/core/workers/worker_navigator.cc +++ b/third_party/blink/renderer/core/workers/worker_navigator.cc -@@ -61,4 +61,9 @@ +@@ -61,4 +61,9 @@ void WorkerNavigator::NotifyUpdate() { *Event::Create(event_type_names::kLanguagechange)); } @@ -70,7 +69,7 @@ index 344382b..a4159a4 100644 + } // namespace blink diff --git a/third_party/blink/renderer/core/workers/worker_navigator.h b/third_party/blink/renderer/core/workers/worker_navigator.h -index ea07a96..ab622f8e 100644 +index ea07a96390fbcf06853d80b7b20cf50128494e9a..ab622f8ebc6a5f68ceb9f867876b6bf696d3fc30 100644 --- a/third_party/blink/renderer/core/workers/worker_navigator.h +++ b/third_party/blink/renderer/core/workers/worker_navigator.h @@ -29,6 +29,7 @@ @@ -81,7 +80,7 @@ index ea07a96..ab622f8e 100644 #include "third_party/blink/renderer/platform/wtf/text/wtf_string.h" namespace blink { -@@ -46,6 +47,9 @@ +@@ -46,6 +47,9 @@ class CORE_EXPORT WorkerNavigator final : public NavigatorBase, // AcceptLanguagesWatcher override void NotifyUpdate() override; @@ -92,7 +91,7 @@ index ea07a96..ab622f8e 100644 } // namespace blink diff --git a/third_party/blink/renderer/modules/service_worker/web_service_worker_fetch_context_impl.cc b/third_party/blink/renderer/modules/service_worker/web_service_worker_fetch_context_impl.cc -index 0815877..4930962 100644 +index 585c6ab98495f168cc731afcdc2137156a32c0b4..cac6af5b89fee1c23c6f56ce0aeee4376647cf6e 100644 --- a/third_party/blink/renderer/modules/service_worker/web_service_worker_fetch_context_impl.cc +++ b/third_party/blink/renderer/modules/service_worker/web_service_worker_fetch_context_impl.cc @@ -18,6 +18,7 @@ @@ -103,7 +102,7 @@ index 0815877..4930962 100644 #include "third_party/blink/renderer/platform/loader/fetch/url_loader/url_loader_factory.h" #include "third_party/blink/renderer/platform/loader/internet_disconnected_url_loader.h" -@@ -226,9 +227,12 @@ +@@ -226,9 +227,12 @@ void WebServiceWorkerFetchContextImpl::UpdateSubresourceLoaderFactories( void WebServiceWorkerFetchContextImpl::NotifyUpdate( const RendererPreferences& new_prefs) { @@ -120,7 +119,7 @@ index 0815877..4930962 100644 } diff --git a/third_party/blink/renderer/modules/service_worker/web_service_worker_fetch_context_impl.h b/third_party/blink/renderer/modules/service_worker/web_service_worker_fetch_context_impl.h -index a7c897de..c2f1c9d 100644 +index c59acba074327eb609ae40c069873272a3aa0e71..dad815728a335e5e0de77b95f0dcae871fb6a9ce 100644 --- a/third_party/blink/renderer/modules/service_worker/web_service_worker_fetch_context_impl.h +++ b/third_party/blink/renderer/modules/service_worker/web_service_worker_fetch_context_impl.h @@ -6,16 +6,16 @@ @@ -142,7 +141,7 @@ index a7c897de..c2f1c9d 100644 #include "third_party/blink/renderer/platform/weborigin/kurl.h" #include "third_party/blink/renderer/platform/wtf/text/wtf_string.h" #include "third_party/blink/renderer/platform/wtf/vector.h" -@@ -135,7 +135,7 @@ +@@ -135,7 +135,7 @@ class BLINK_EXPORT WebServiceWorkerFetchContextImpl final // This is owned by ThreadedMessagingProxyBase on the main thread. raw_ptr terminate_sync_load_event_ = nullptr; @@ -152,10 +151,10 @@ index a7c897de..c2f1c9d 100644 Vector cors_exempt_header_list_; bool is_offline_mode_ = false; diff --git a/third_party/blink/renderer/platform/BUILD.gn b/third_party/blink/renderer/platform/BUILD.gn -index 5b8e391..12da20f 100644 +index ab2baebf60fe9bdf9502e404c99dd8ddac79c24e..fc64a15df563830e495f3589823116c18f3ebc8c 100644 --- a/third_party/blink/renderer/platform/BUILD.gn +++ b/third_party/blink/renderer/platform/BUILD.gn -@@ -341,6 +341,7 @@ +@@ -335,6 +335,7 @@ component("platform") { output_name = "blink_platform" sources = [ @@ -165,7 +164,7 @@ index 5b8e391..12da20f 100644 "animation/compositor_animation.cc", diff --git a/third_party/blink/renderer/platform/accept_languages_watcher.h b/third_party/blink/renderer/platform/accept_languages_watcher.h new file mode 100644 -index 0000000..7fd5de07f +index 0000000000000000000000000000000000000000..7fd5de07fb26863deab3f921f678f0628f496f2d --- /dev/null +++ b/third_party/blink/renderer/platform/accept_languages_watcher.h @@ -0,0 +1,22 @@ @@ -192,10 +191,10 @@ index 0000000..7fd5de07f + +#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_ACCEPT_LANGUAGES_WATCHER_H_ diff --git a/third_party/blink/renderer/platform/loader/fetch/url_loader/DEPS b/third_party/blink/renderer/platform/loader/fetch/url_loader/DEPS -index c8a92c06..7886b02 100644 +index c8a92c0641ddbe972239acbc44593058ddea7159..7886b02241bd44edfeea7a5af7a0d8dd545308f3 100644 --- a/third_party/blink/renderer/platform/loader/fetch/url_loader/DEPS +++ b/third_party/blink/renderer/platform/loader/fetch/url_loader/DEPS -@@ -28,4 +28,7 @@ +@@ -28,4 +28,7 @@ specific_include_rules = { "web_url_loader_unittest.cc": [ "+net/test/cert_test_util.h" ], @@ -204,7 +203,7 @@ index c8a92c06..7886b02 100644 + ], } diff --git a/third_party/blink/renderer/platform/loader/fetch/url_loader/dedicated_or_shared_worker_fetch_context_impl.cc b/third_party/blink/renderer/platform/loader/fetch/url_loader/dedicated_or_shared_worker_fetch_context_impl.cc -index cc1954f6..c9f96ff 100644 +index 723e6fb730bab1571e87dd2544104e1a80e463c4..17df81491e20181d6b2ea57397e63459ba3f75ea 100644 --- a/third_party/blink/renderer/platform/loader/fetch/url_loader/dedicated_or_shared_worker_fetch_context_impl.cc +++ b/third_party/blink/renderer/platform/loader/fetch/url_loader/dedicated_or_shared_worker_fetch_context_impl.cc @@ -28,6 +28,7 @@ @@ -215,7 +214,7 @@ index cc1954f6..c9f96ff 100644 #include "third_party/blink/renderer/platform/loader/fetch/url_loader/url_loader.h" #include "third_party/blink/renderer/platform/loader/fetch/url_loader/url_loader_factory.h" #include "url/url_constants.h" -@@ -605,9 +606,13 @@ +@@ -600,9 +601,13 @@ void DedicatedOrSharedWorkerFetchContextImpl::UpdateSubresourceLoaderFactories( void DedicatedOrSharedWorkerFetchContextImpl::NotifyUpdate( const RendererPreferences& new_prefs) { @@ -233,7 +232,7 @@ index cc1954f6..c9f96ff 100644 for (auto& watcher : child_preference_watchers_) watcher->NotifyUpdate(new_prefs); diff --git a/third_party/blink/renderer/platform/loader/fetch/url_loader/dedicated_or_shared_worker_fetch_context_impl.h b/third_party/blink/renderer/platform/loader/fetch/url_loader/dedicated_or_shared_worker_fetch_context_impl.h -index b95a25fe..d0387cf 100644 +index 113e65b3154981dd16f0e8839ad8dc9add33d392..33814865741bd0d1e2b73142f384f7024e119ca6 100644 --- a/third_party/blink/renderer/platform/loader/fetch/url_loader/dedicated_or_shared_worker_fetch_context_impl.h +++ b/third_party/blink/renderer/platform/loader/fetch/url_loader/dedicated_or_shared_worker_fetch_context_impl.h @@ -23,6 +23,7 @@ @@ -244,7 +243,7 @@ index b95a25fe..d0387cf 100644 #include "third_party/blink/renderer/platform/wtf/casting.h" #include "third_party/blink/renderer/platform/wtf/text/wtf_string.h" #include "third_party/blink/renderer/platform/wtf/vector.h" -@@ -301,7 +302,7 @@ +@@ -300,7 +301,7 @@ class BLINK_PLATFORM_EXPORT DedicatedOrSharedWorkerFetchContextImpl final std::unique_ptr weak_wrapper_resource_load_info_notifier_; diff --git a/patches/chromium/remove_persistentmemoryallocator_getallocsize.patch b/patches/chromium/remove_persistentmemoryallocator_getallocsize.patch new file mode 100644 index 000000000000..5fa83a777df3 --- /dev/null +++ b/patches/chromium/remove_persistentmemoryallocator_getallocsize.patch @@ -0,0 +1,1334 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Roger McFarlane +Date: Tue, 17 Dec 2024 12:20:05 -0800 +Subject: Remove PersistentMemoryAllocator::GetAllocSize() + +This CL removes PersistentMemoryAllocator::GetAllocSize() in favor +of allowing various other API entry points to return the alloc size. +This mitigates potential TOCTOU errors where the size of an alloc +is validated by one API then separately fetched in another call. The +size could otherwise be manipulated in between initial validation and +the subsequent fetch. + +(cherry picked from commit 23479ae0d3332f5525cfd9491137fc6c0ffcb46a) + +Bug: 378623799 +Change-Id: I8021cf4c07f1a96172deb2a252326e9ffa525798 +Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/6025612 +Reviewed-by: Alexei Svitkine +Commit-Queue: Roger McFarlane +Cr-Original-Commit-Position: refs/heads/main@{#1394492} +Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/6098919 +Auto-Submit: Roger McFarlane +Commit-Queue: Luc Nguyen +Reviewed-by: Luc Nguyen +Cr-Commit-Position: refs/branch-heads/6834@{#2335} +Cr-Branched-From: 47a3549fac11ee8cb7be6606001ede605b302b9f-refs/heads/main@{#1381561} + +diff --git a/base/metrics/field_trial.cc b/base/metrics/field_trial.cc +index 21d60b117a18dfba9c50bae62fe9c0210169f0bf..77ba8087873b62dca51124126e24bcb78aad2ccf 100644 +--- a/base/metrics/field_trial.cc ++++ b/base/metrics/field_trial.cc +@@ -124,7 +124,7 @@ void PickleFieldTrial(const FieldTrial::PickleState& trial_state, + } + + // Returns the boundary value for comparing against the FieldTrial's added +-// groups for a given |divisor| (total probability) and |entropy_value|. ++// groups for a given `divisor` (total probability) and `entropy_value`. + FieldTrial::Probability GetGroupBoundaryValue( + FieldTrial::Probability divisor, + double entropy_value) { +@@ -138,7 +138,7 @@ FieldTrial::Probability GetGroupBoundaryValue( + const double kEpsilon = 1e-8; + const FieldTrial::Probability result = + static_cast(divisor * entropy_value + kEpsilon); +- // Ensure that adding the epsilon still results in a value < |divisor|. ++ // Ensure that adding the epsilon still results in a value < `divisor`. + return std::min(result, divisor - 1); + } + +@@ -259,7 +259,7 @@ void FieldTrial::AppendGroup(const std::string& name, + if (forced_) { + DCHECK(!group_name_.empty()); + if (name == group_name_) { +- // Note that while |group_| may be equal to |kDefaultGroupNumber| on the ++ // Note that while `group_` may be equal to `kDefaultGroupNumber` on the + // forced trial, it will not have the same value as the default group + // number returned from the non-forced |FactoryGetFieldTrial()| call, + // which takes care to ensure that this does not happen. +@@ -326,7 +326,7 @@ bool FieldTrial::IsOverridden() const { + void FieldTrial::EnableBenchmarking() { + // We don't need to see field trials created via CreateFieldTrial() for + // benchmarking, because such field trials have only a single group and are +- // not affected by randomization that |enable_benchmarking_| would disable. ++ // not affected by randomization that `enable_benchmarking_` would disable. + DCHECK_EQ(0u, FieldTrialList::GetRandomizedFieldTrialCount()); + enable_benchmarking_ = true; + } +@@ -453,7 +453,7 @@ void FieldTrial::FinalizeGroupChoice() { + if (group_ != kNotFinalized) + return; + accumulated_group_probability_ = divisor_; +- // Here it's OK to use |kDefaultGroupNumber| since we can't be forced and not ++ // Here it's OK to use `kDefaultGroupNumber` since we can't be forced and not + // finalized. + DCHECK(!forced_); + SetGroupChoice(default_group_name_, kDefaultGroupNumber); +@@ -807,7 +807,7 @@ FieldTrial* FieldTrialList::CreateFieldTrial(std::string_view name, + field_trial = new FieldTrial(name, kTotalProbability, group_name, 0, + is_low_anonymity, is_overridden); + // The group choice will be finalized in this method. So +- // |is_randomized_trial| should be false. ++ // `is_randomized_trial` should be false. + FieldTrialList::Register(field_trial, /*is_randomized_trial=*/false); + // Force the trial, which will also finalize the group choice. + field_trial->SetForced(); +@@ -910,12 +910,12 @@ bool FieldTrialList::GetParamsFromSharedMemory( + if (!field_trial->ref_) + return false; + ++ size_t allocated_size = 0; + const FieldTrial::FieldTrialEntry* entry = + global_->field_trial_allocator_->GetAsObject( +- field_trial->ref_); ++ field_trial->ref_, &allocated_size); ++ CHECK(entry); + +- size_t allocated_size = +- global_->field_trial_allocator_->GetAllocSize(field_trial->ref_); + uint64_t actual_size = + sizeof(FieldTrial::FieldTrialEntry) + entry->pickle_size; + if (allocated_size < actual_size) +diff --git a/base/metrics/persistent_histogram_allocator.cc b/base/metrics/persistent_histogram_allocator.cc +index 785d24a7e0aae68d309f334488d9e81b7f34eeb3..bb82d15ab105cc22faeb2594afb2ff5d435adec1 100644 +--- a/base/metrics/persistent_histogram_allocator.cc ++++ b/base/metrics/persistent_histogram_allocator.cc +@@ -89,13 +89,13 @@ std::unique_ptr CreateRangesFromData( + } + + // Calculate the number of bytes required to store all of a histogram's +-// "counts". This will return zero (0) if |bucket_count| is not valid. ++// "counts". This will return zero (0) if `bucket_count` is not valid. + size_t CalculateRequiredCountsBytes(size_t bucket_count) { + // 2 because each "sample count" also requires a backup "logged count" + // used for calculating the delta during snapshot operations. + const size_t kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount); + +- // If the |bucket_count| is such that it would overflow the return type, ++ // If the `bucket_count` is such that it would overflow the return type, + // perhaps as the result of a malicious actor, then return zero to + // indicate the problem to the caller. + if (bucket_count > std::numeric_limits::max() / kBytesPerBucket) +@@ -190,7 +190,7 @@ std::vector + PersistentSparseHistogramDataManager::LoadRecords( + PersistentSampleMapRecords* sample_map_records, + std::optional until_value) { +- // DataManager must be locked in order to access the |sample_records_| ++ // DataManager must be locked in order to access the `sample_records_` + // vectors. + base::AutoLock auto_lock(lock_); + +@@ -236,7 +236,7 @@ PersistentSparseHistogramDataManager::LoadRecords( + } + + // Return all references found that have not yet been seen by +- // |sample_map_records|, up until |until_value| (if applicable). ++ // `sample_map_records`, up until `until_value` (if applicable). + std::vector new_references; + CHECK_GE(found_records.size(), sample_map_records->seen_); + auto new_found_records = base::make_span(found_records) +@@ -244,9 +244,9 @@ PersistentSparseHistogramDataManager::LoadRecords( + new_references.reserve(new_found_records.size()); + for (const auto& new_record : new_found_records) { + new_references.push_back(new_record.reference); +- // Maybe references after |until_value| were found. Stop here immediately in ++ // Maybe references after `until_value` were found. Stop here immediately in + // such a case, since the caller will not expect any more samples after +- // |until_value|. ++ // `until_value`. + if (until_value.has_value() && new_record.value == until_value.value()) { + break; + } +@@ -335,9 +335,9 @@ std::unique_ptr PersistentHistogramAllocator::GetHistogram( + // count data (while these must reference the persistent counts) and always + // add it to the local list of known histograms (while these may be simple + // references to histograms in other processes). ++ size_t length = 0; + PersistentHistogramData* data = +- memory_allocator_->GetAsObject(ref); +- const size_t length = memory_allocator_->GetAllocSize(ref); ++ memory_allocator_->GetAsObject(ref, &length); + + // Check that metadata is reasonable: name is null-terminated and non-empty, + // ID fields have been loaded with a hash of the name (0 is considered +@@ -345,7 +345,7 @@ std::unique_ptr PersistentHistogramAllocator::GetHistogram( + if (!data || data->name[0] == '\0' || + reinterpret_cast(data)[length - 1] != '\0' || + data->samples_metadata.id == 0 || data->logged_metadata.id == 0 || +- // Note: Sparse histograms use |id + 1| in |logged_metadata|. ++ // Note: Sparse histograms use `id + 1` in `logged_metadata`. + (data->logged_metadata.id != data->samples_metadata.id && + data->logged_metadata.id != data->samples_metadata.id + 1) || + // Most non-matching values happen due to truncated names. Ideally, we +@@ -388,7 +388,7 @@ std::unique_ptr PersistentHistogramAllocator::AllocateHistogram( + histogram_data->histogram_type = histogram_type; + histogram_data->flags = flags | HistogramBase::kIsPersistent; + +- // |counts_ref| relies on being zero'd out initially. Even though this ++ // `counts_ref` relies on being zero'd out initially. Even though this + // should always be the case, manually zero it out again here in case there + // was memory corruption (e.g. if the memory was mapped from a corrupted + // spare file). +@@ -402,7 +402,7 @@ std::unique_ptr PersistentHistogramAllocator::AllocateHistogram( + size_t bucket_count = bucket_ranges->bucket_count(); + size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count); + if (counts_bytes == 0) { +- // |bucket_count| was out-of-range. ++ // `bucket_count` was out-of-range. + return nullptr; + } + +@@ -410,8 +410,8 @@ std::unique_ptr PersistentHistogramAllocator::AllocateHistogram( + // objects for re-use, it would be dangerous for one to hold a reference + // from a persistent allocator that is not the global one (which is + // permanent once set). If this stops being the case, this check can +- // become an "if" condition beside "!ranges_ref" below and before +- // set_persistent_reference() farther down. ++ // become an `if` condition beside `!ranges_ref` below and before ++ // `set_persistent_reference()` farther down. + DCHECK_EQ(this, GlobalHistogramAllocator::Get()); + + // Re-use an existing BucketRanges persistent allocation if one is known; +@@ -448,7 +448,7 @@ std::unique_ptr PersistentHistogramAllocator::AllocateHistogram( + if (ranges_ref && histogram_data) { + histogram_data->minimum = minimum; + histogram_data->maximum = maximum; +- // |bucket_count| must fit within 32-bits or the allocation of the counts ++ // `bucket_count` must fit within 32-bits or the allocation of the counts + // array would have failed for being too large; the allocator supports + // less than 4GB total size. + histogram_data->bucket_count = static_cast(bucket_count); +@@ -461,7 +461,7 @@ std::unique_ptr PersistentHistogramAllocator::AllocateHistogram( + + if (histogram_data) { + // Create the histogram using resources in persistent memory. This ends up +- // resolving the "ref" values stored in histogram_data instad of just ++ // resolving the `ref` values stored in histogram_data instead of just + // using what is already known above but avoids duplicating the switch + // statement here and serves as a double-check that everything is + // correct before commiting the new histogram to persistent space. +@@ -600,17 +600,16 @@ std::unique_ptr PersistentHistogramAllocator::CreateHistogram( + uint32_t histogram_ranges_ref = histogram_data_ptr->ranges_ref; + uint32_t histogram_ranges_checksum = histogram_data_ptr->ranges_checksum; + ++ size_t allocated_bytes = 0; + HistogramBase::Sample* ranges_data = + memory_allocator_->GetAsArray( + histogram_ranges_ref, kTypeIdRangesArray, +- PersistentMemoryAllocator::kSizeAny); ++ PersistentMemoryAllocator::kSizeAny, &allocated_bytes); + + const uint32_t max_buckets = + std::numeric_limits::max() / sizeof(HistogramBase::Sample); + size_t required_bytes = + (histogram_bucket_count + 1) * sizeof(HistogramBase::Sample); +- size_t allocated_bytes = +- memory_allocator_->GetAllocSize(histogram_ranges_ref); + if (!ranges_data || histogram_bucket_count < 2 || + histogram_bucket_count >= max_buckets || + allocated_bytes < required_bytes) { +@@ -638,11 +637,14 @@ std::unique_ptr PersistentHistogramAllocator::CreateHistogram( + } + + size_t counts_bytes = CalculateRequiredCountsBytes(histogram_bucket_count); ++ if (counts_bytes == 0) { ++ return nullptr; ++ } ++ + PersistentMemoryAllocator::Reference counts_ref = + histogram_data_ptr->counts_ref.load(std::memory_order_acquire); +- if (counts_bytes == 0 || +- (counts_ref != 0 && +- memory_allocator_->GetAllocSize(counts_ref) < counts_bytes)) { ++ if (counts_ref != 0 && !memory_allocator_->GetAsArray( ++ counts_ref, kTypeIdCountsArray, counts_bytes)) { + return nullptr; + } + +@@ -970,7 +972,7 @@ void GlobalHistogramAllocator::Set(GlobalHistogramAllocator* allocator) { + // histogram allocator was initialized. + // + // TODO(crbug.com/40945497): CHECK(histogram_count == 0) and remove emit of +- // early histogram count once |histogram_count| is reliably zero (0) for all ++ // early histogram count once `histogram_count` is reliably zero (0) for all + // process types. + size_t histogram_count = StatisticsRecorder::GetHistogramCount(); + if (histogram_count != 0) { +diff --git a/base/metrics/persistent_histogram_allocator.h b/base/metrics/persistent_histogram_allocator.h +index d5d4e03fa56aa60d7c5799db4cd7fc0854ceae4d..bbb11d6f57f348dc601e5dbfbcc6a26ebd0a877a 100644 +--- a/base/metrics/persistent_histogram_allocator.h ++++ b/base/metrics/persistent_histogram_allocator.h +@@ -47,8 +47,8 @@ class BASE_EXPORT PersistentSparseHistogramDataManager { + ~PersistentSparseHistogramDataManager(); + + // Returns an object that manages persistent-sample-map records for a given +- // |id|. The returned object queries |this| for records. Hence, the returned +- // object must not outlive |this|. ++ // `id`. The returned object queries `this` for records. Hence, the returned ++ // object must not outlive `this`. + std::unique_ptr CreateSampleMapRecords( + uint64_t id); + +@@ -71,19 +71,19 @@ class BASE_EXPORT PersistentSparseHistogramDataManager { + std::vector* GetSampleMapRecordsWhileLocked(uint64_t id) + EXCLUSIVE_LOCKS_REQUIRED(lock_); + +- // Returns sample-map records belonging to the specified |sample_map_records|. +- // Only records found that were not yet seen by |sample_map_records| will be +- // returned, determined by its |seen_| field. Records found for other ++ // Returns sample-map records belonging to the specified `sample_map_records`. ++ // Only records found that were not yet seen by `sample_map_records` will be ++ // returned, determined by its `seen_` field. Records found for other + // sample-maps are held for later use without having to iterate again. This + // should be called only from a PersistentSampleMapRecords object because + // those objects have a contract that there are no other threads accessing the +- // internal records_ field of the object that is passed in. If |until_value| ++ // internal records_ field of the object that is passed in. If `until_value` + // is set and a sample is found with said value, the search will stop early + // and the last entry in the returned vector will be that sample. + // Note: The returned vector is not guaranteed to contain all unseen records +- // for |sample_map_records|. If this is needed, then repeatedly call this ++ // for `sample_map_records`. If this is needed, then repeatedly call this + // until an empty vector is returned, which definitely means that +- // |sample_map_records| has seen all its records. ++ // `sample_map_records` has seen all its records. + std::vector LoadRecords( + PersistentSampleMapRecords* sample_map_records, + std::optional until_value); +@@ -112,7 +112,7 @@ class BASE_EXPORT PersistentSampleMapRecords { + // Constructs an instance of this class. The manager object must live longer + // than all instances of this class that reference it, which is not usually + // a problem since these objects are generally managed from within that +- // manager instance. The same caveats apply for for the |records| vector. ++ // manager instance. The same caveats apply for for the `records` vector. + PersistentSampleMapRecords( + PersistentSparseHistogramDataManager* data_manager, + uint64_t sample_map_id, +@@ -125,18 +125,18 @@ class BASE_EXPORT PersistentSampleMapRecords { + + ~PersistentSampleMapRecords(); + +- // Gets next references to persistent sample-map records. If |until_value| is ++ // Gets next references to persistent sample-map records. If `until_value` is + // passed, and said value is found, then it will be the last element in the + // returned vector. The type and layout of the data being referenced is + // defined entirely within the PersistentSampleMap class. + // Note: The returned vector is not guaranteed to contain all unseen records +- // for |this|. If this is needed, then repeatedly call this until an empty +- // vector is returned, which definitely means that |this| has seen all its ++ // for `this`. If this is needed, then repeatedly call this until an empty ++ // vector is returned, which definitely means that `this` has seen all its + // records. + std::vector GetNextRecords( + std::optional until_value); + +- // Creates a new persistent sample-map record for sample |value| and returns ++ // Creates a new persistent sample-map record for sample `value` and returns + // a reference to it. + PersistentMemoryAllocator::Reference CreateNew(HistogramBase::Sample value); + +@@ -160,7 +160,7 @@ class BASE_EXPORT PersistentSampleMapRecords { + // ID of PersistentSampleMap to which these records apply. + const uint64_t sample_map_id_; + +- // This is the count of how many "records" have already been read by |this|. ++ // This is the count of how many "records" have already been read by `this`. + size_t seen_ = 0; + + // This is the set of records found during iteration through memory, owned by +@@ -185,7 +185,7 @@ class BASE_EXPORT PersistentHistogramAllocator { + // See PersistentMemoryAllocator::Iterator for more information. + class BASE_EXPORT Iterator { + public: +- // Constructs an iterator on a given |allocator|, starting at the beginning. ++ // Constructs an iterator on a given `allocator`, starting at the beginning. + // The allocator must live beyond the lifetime of the iterator. + explicit Iterator(PersistentHistogramAllocator* allocator); + +@@ -198,7 +198,7 @@ class BASE_EXPORT PersistentHistogramAllocator { + std::unique_ptr GetNext() { return GetNextWithIgnore(0); } + + // Gets the next histogram from persistent memory, ignoring one particular +- // reference in the process. Pass |ignore| of zero (0) to ignore nothing. ++ // reference in the process. Pass `ignore` of zero (0) to ignore nothing. + std::unique_ptr GetNextWithIgnore(Reference ignore); + + private: +@@ -239,7 +239,7 @@ class BASE_EXPORT PersistentHistogramAllocator { + + // Recreate a Histogram from data held in persistent memory. Though this + // object will be local to the current process, the sample data will be +- // shared with all other threads referencing it. This method takes a |ref| ++ // shared with all other threads referencing it. This method takes a `ref` + // to where the top-level histogram data may be found in this allocator. + // This method will return null if any problem is detected with the data. + std::unique_ptr GetHistogram(Reference ref); +@@ -256,7 +256,7 @@ class BASE_EXPORT PersistentHistogramAllocator { + Reference* ref_ptr); + + // Finalize the creation of the histogram, making it available to other +- // processes if |registered| (as in: added to the StatisticsRecorder) is ++ // processes if `registered` (as in: added to the StatisticsRecorder) is + // True, forgetting it otherwise. + void FinalizeHistogram(Reference ref, bool registered); + +@@ -274,36 +274,36 @@ class BASE_EXPORT PersistentHistogramAllocator { + const HistogramBase* histogram); + + // Returns an object that manages persistent-sample-map records for a given +- // |id|. The returned object queries |sparse_histogram_data_manager_| for ++ // `id`. The returned object queries `sparse_histogram_data_manager_` for + // records. Hence, the returned object must not outlive +- // |sparse_histogram_data_manager_| (and hence |this|). ++ // `sparse_histogram_data_manager_` (and hence `this`). + std::unique_ptr CreateSampleMapRecords( + uint64_t id); + + // Creates internal histograms for tracking memory use and allocation sizes +- // for allocator of |name| (which can simply be the result of Name()). This +- // is done seperately from construction for situations such as when the ++ // for allocator of `name` (which can simply be the result of Name()). This ++ // is done separately from construction for situations such as when the + // histograms will be backed by memory provided by this very allocator. + // + // IMPORTANT: tools/metrics/histograms/metadata/uma/histograms.xml must +- // be updated with the following histograms for each |name| param: ++ // be updated with the following histograms for each `name` param: + // UMA.PersistentAllocator.name.Errors + // UMA.PersistentAllocator.name.UsedPct + void CreateTrackingHistograms(std::string_view name); + void UpdateTrackingHistograms(); + +- // Sets the internal |ranges_manager_|, which will be used by the allocator to +- // register BucketRanges. Takes ownership of the passed |ranges_manager|. ++ // Sets the internal `ranges_manager_`, which will be used by the allocator to ++ // register BucketRanges. Takes ownership of the passed `ranges_manager`. + // +- // WARNING: Since histograms may be created from |this| from multiple threads, ++ // WARNING: Since histograms may be created from `this` from multiple threads, + // for example through a direct call to CreateHistogram(), or while iterating +- // through |this|, then the passed manager may also be accessed concurrently. ++ // through `this`, then the passed manager may also be accessed concurrently. + // Hence, care must be taken to ensure that either: + // 1) The passed manager is threadsafe (see ThreadSafeRangesManager), or +- // 2) |this| is not used concurrently. ++ // 2) `this` is not used concurrently. + void SetRangesManager(RangesManager* ranges_manager); + +- // Clears the internal |last_created_| reference so testing can validate ++ // Clears the internal `last_created_` reference so testing can validate + // operation without that optimization. + void ClearLastCreatedReferenceForTesting(); + +@@ -329,7 +329,7 @@ class BASE_EXPORT PersistentHistogramAllocator { + PersistentHistogramData* histogram_data_ptr); + + // Gets or creates an object in the global StatisticsRecorder matching +- // the |histogram| passed. Null is returned if one was not found and ++ // the `histogram` passed. Null is returned if one was not found and + // one could not be created. + HistogramBase* GetOrCreateStatisticsRecorderHistogram( + const HistogramBase* histogram); +@@ -365,7 +365,7 @@ class BASE_EXPORT GlobalHistogramAllocator + + ~GlobalHistogramAllocator() override; + +- // Create a global allocator using the passed-in memory |base|, |size|, and ++ // Create a global allocator using the passed-in memory `base`, `size`, and + // other parameters. Ownership of the memory segment remains with the caller. + static void CreateWithPersistentMemory(void* base, + size_t size, +@@ -374,17 +374,17 @@ class BASE_EXPORT GlobalHistogramAllocator + std::string_view name); + + // Create a global allocator using an internal block of memory of the +- // specified |size| taken from the heap. ++ // specified `size` taken from the heap. + static void CreateWithLocalMemory(size_t size, + uint64_t id, + std::string_view name); + + #if !BUILDFLAG(IS_NACL) +- // Create a global allocator by memory-mapping a |file|. If the file does +- // not exist, it will be created with the specified |size|. If the file does ++ // Create a global allocator by memory-mapping a `file`. If the file does ++ // not exist, it will be created with the specified `size`. If the file does + // exist, the allocator will use and add to its contents, ignoring the passed + // size in favor of the existing size. Returns whether the global allocator +- // was set. If |exclusive_write| is true, the file will be opened in a mode ++ // was set. If `exclusive_write` is true, the file will be opened in a mode + // that disallows multiple concurrent writers (no effect on non-Windows). + static bool CreateWithFile(const FilePath& file_path, + size_t size, +@@ -392,9 +392,9 @@ class BASE_EXPORT GlobalHistogramAllocator + std::string_view name, + bool exclusive_write = false); + +- // Creates a new file at |active_path|. If it already exists, it will first be +- // moved to |base_path|. In all cases, any old file at |base_path| will be +- // removed. If |spare_path| is non-empty and exists, that will be renamed and ++ // Creates a new file at `active_path`. If it already exists, it will first be ++ // moved to `base_path`. In all cases, any old file at `base_path` will be ++ // removed. If `spare_path` is non-empty and exists, that will be renamed and + // used as the active file. Otherwise, the file will be created using the + // given size, id, and name. Returns whether the global allocator was set. + static bool CreateWithActiveFile(const FilePath& base_path, +@@ -405,9 +405,9 @@ class BASE_EXPORT GlobalHistogramAllocator + std::string_view name); + + // Uses ConstructBaseActivePairFilePaths() to build a pair of file names which +- // are then used for CreateWithActiveFile(). |name| is used for both the ++ // are then used for CreateWithActiveFile(). `name` is used for both the + // internal name for the allocator and also for the name of the file inside +- // |dir|. ++ // `dir`. + static bool CreateWithActiveFileInDir(const FilePath& dir, + size_t size, + uint64_t id, +@@ -442,7 +442,7 @@ class BASE_EXPORT GlobalHistogramAllocator + #endif + + // Create a global allocator using a block of shared memory accessed +- // through the given |region|. The allocator maps the shared memory into ++ // through the given `region`. The allocator maps the shared memory into + // current process's virtual address space and frees it upon destruction. + // The memory will continue to live if other processes have access to it. + static void CreateWithSharedMemoryRegion( +@@ -481,7 +481,7 @@ class BASE_EXPORT GlobalHistogramAllocator + bool HasPersistentLocation() const; + + // Moves the file being used to persist this allocator's data to the directory +- // specified by |dir|. Returns whether the operation was successful. ++ // specified by `dir`. Returns whether the operation was successful. + bool MovePersistentFile(const FilePath& dir); + + // Writes the internal data to a previously set location. This is generally +diff --git a/base/metrics/persistent_memory_allocator.cc b/base/metrics/persistent_memory_allocator.cc +index d4ccde87f333dfb1754e89b9651351fcc47de917..46e239dff185262ea7bc8b89baaba9dc5eb0435e 100644 +--- a/base/metrics/persistent_memory_allocator.cc ++++ b/base/metrics/persistent_memory_allocator.cc +@@ -59,7 +59,7 @@ constexpr uint32_t kGlobalCookie = 0x408305DC; + // the metadata, the version number can be queried to operate in a backward- + // compatible manner until the memory segment is completely re-initalized. + // Note: If you update the metadata in a non-backwards compatible way, reset +-// |kCompatibleVersions|. Otherwise, add the previous version. ++// `kCompatibleVersions`. Otherwise, add the previous version. + constexpr uint32_t kGlobalVersion = 3; + static constexpr uint32_t kOldCompatibleVersions[] = {2}; + +@@ -146,12 +146,12 @@ struct PersistentMemoryAllocator::SharedMetadata { + + // The "iterable" queue is an M&S Queue as described here, append-only: + // https://www.research.ibm.com/people/m/michael/podc-1996.pdf +- // |queue| needs to be 64-bit aligned and is itself a multiple of 64 bits. ++ // `queue` needs to be 64-bit aligned and is itself a multiple of 64 bits. + volatile std::atomic tailptr; // Last block of iteration queue. + volatile BlockHeader queue; // Empty block for linked-list head/tail. + }; + +-// The "queue" block header is used to detect "last node" so that zero/null ++// The `queue` block header is used to detect the "last node" so that zero/null + // can be used to indicate that it hasn't been added at all. It is part of + // the SharedMetadata structure which itself is always located at offset zero. + const PersistentMemoryAllocator::Reference +@@ -208,7 +208,8 @@ PersistentMemoryAllocator::Iterator::GetLast() { + } + + PersistentMemoryAllocator::Reference +-PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return) { ++PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return, ++ size_t* alloc_size) { + // Make a copy of the existing count of found-records, acquiring all changes + // made to the allocator, notably "freeptr" (see comment in loop for why + // the load of that value cannot be moved above here) that occurred during +@@ -219,12 +220,13 @@ PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return) { + // "count" was fetched _after_ "freeptr" then it would be possible for + // this thread to be interrupted between them and other threads perform + // multiple allocations, make-iterables, and iterations (with the included +- // increment of |record_count_|) culminating in the check at the bottom ++ // increment of `record_count_`) culminating in the check at the bottom + // mistakenly determining that a loop exists. Isn't this stuff fun? + uint32_t count = record_count_.load(std::memory_order_acquire); + + Reference last = last_record_.load(std::memory_order_acquire); +- Reference next; ++ Reference next = 0; ++ size_t next_size = 0; + while (true) { + const volatile BlockHeader* block = + allocator_->GetBlock(last, 0, 0, true, false); +@@ -245,7 +247,7 @@ PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return) { + next = block->next.load(std::memory_order_acquire); + if (next == kReferenceQueue) // No next allocation in queue. + return kReferenceNull; +- block = allocator_->GetBlock(next, 0, 0, false, false); ++ block = allocator_->GetBlock(next, 0, 0, false, false, &next_size); + if (!block) { // Memory is corrupt. + allocator_->SetCorrupt(); + return kReferenceNull; +@@ -286,21 +288,29 @@ PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return) { + // It does not matter if it falls behind temporarily so long as it never + // gets ahead. + record_count_.fetch_add(1, std::memory_order_release); ++ if (alloc_size) { ++ *alloc_size = next_size; ++ } + return next; + } + + PersistentMemoryAllocator::Reference +-PersistentMemoryAllocator::Iterator::GetNextOfType(uint32_t type_match) { ++PersistentMemoryAllocator::Iterator::GetNextOfType(uint32_t type_match, ++ size_t* alloc_size) { + Reference ref; ++ size_t size; + uint32_t type_found; +- while ((ref = GetNext(&type_found)) != 0) { +- if (type_found == type_match) ++ while ((ref = GetNext(&type_found, &size)) != 0) { ++ if (type_found == type_match) { ++ if (alloc_size) { ++ *alloc_size = size; ++ } + return ref; ++ } + } + return kReferenceNull; + } + +- + // static + bool PersistentMemoryAllocator::IsMemoryAcceptable(const void* base, + size_t size, +@@ -475,12 +485,12 @@ uint64_t PersistentMemoryAllocator::Id() const { + + const char* PersistentMemoryAllocator::Name() const { + Reference name_ref = shared_meta()->name; +- const char* name_cstr = +- GetAsArray(name_ref, 0, PersistentMemoryAllocator::kSizeAny); ++ size_t name_length = 0; ++ const char* name_cstr = GetAsArray( ++ name_ref, 0, PersistentMemoryAllocator::kSizeAny, &name_length); + if (!name_cstr) + return ""; + +- size_t name_length = GetAllocSize(name_ref); + if (name_cstr[name_length - 1] != '\0') { + NOTREACHED_IN_MIGRATION(); + SetCorrupt(); +@@ -555,23 +565,6 @@ PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetAsReference( + return ref; + } + +-size_t PersistentMemoryAllocator::GetAllocSize(Reference ref) const { +- const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false); +- if (!block) +- return 0; +- uint32_t size = block->size; +- // Header was verified by GetBlock() but a malicious actor could change +- // the value between there and here. Check it again. +- uint32_t total_size; +- if (size <= sizeof(BlockHeader) || +- !base::CheckAdd(ref, size).AssignIfValid(&total_size) || +- total_size > mem_size_) { +- SetCorrupt(); +- return 0; +- } +- return size - sizeof(BlockHeader); +-} +- + uint32_t PersistentMemoryAllocator::GetType(Reference ref) const { + const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false); + if (!block) +@@ -641,8 +634,9 @@ bool PersistentMemoryAllocator::ChangeType(Reference ref, + + PersistentMemoryAllocator::Reference PersistentMemoryAllocator::Allocate( + size_t req_size, +- uint32_t type_id) { +- Reference ref = AllocateImpl(req_size, type_id); ++ uint32_t type_id, ++ size_t* alloc_size) { ++ Reference ref = AllocateImpl(req_size, type_id, alloc_size); + if (ref) { + // Success: Record this allocation in usage stats (if active). + if (allocs_histogram_) +@@ -657,7 +651,8 @@ PersistentMemoryAllocator::Reference PersistentMemoryAllocator::Allocate( + + PersistentMemoryAllocator::Reference PersistentMemoryAllocator::AllocateImpl( + size_t req_size, +- uint32_t type_id) { ++ uint32_t type_id, ++ size_t* alloc_size) { + DCHECK_NE(access_mode_, kReadOnly); + + // Validate req_size to ensure it won't overflow when used as 32-bit value. +@@ -820,6 +815,11 @@ PersistentMemoryAllocator::Reference PersistentMemoryAllocator::AllocateImpl( + block->size = static_cast(size); + block->cookie = kBlockCookieAllocated; + block->type_id.store(type_id, std::memory_order_relaxed); ++ ++ // Return the allocation size if requested. ++ if (alloc_size) { ++ *alloc_size = size - sizeof(BlockHeader); ++ } + return freeptr; + } + } +@@ -932,17 +932,16 @@ bool PersistentMemoryAllocator::IsFull() const { + return CheckFlag(&shared_meta()->flags, kFlagFull); + } + +-// Dereference a block |ref| and ensure that it's valid for the desired +-// |type_id| and |size|. |special| indicates that we may try to access block +-// headers not available to callers but still accessed by this module. By +-// having internal dereferences go through this same function, the allocator +-// is hardened against corruption. + const volatile PersistentMemoryAllocator::BlockHeader* + PersistentMemoryAllocator::GetBlock(Reference ref, + uint32_t type_id, + size_t size, + bool queue_ok, +- bool free_ok) const { ++ bool free_ok, ++ size_t* alloc_size) const { ++ // The caller cannot request `alloc_size` if `queue_ok` or `free_ok`. ++ CHECK(!(alloc_size && (queue_ok || free_ok))); ++ + // Handle special cases. + if (ref == kReferenceQueue && queue_ok) + return reinterpret_cast(mem_base_ + ref); +@@ -961,29 +960,39 @@ PersistentMemoryAllocator::GetBlock(Reference ref, + return nullptr; + } + ++ const volatile BlockHeader* const block = ++ reinterpret_cast(mem_base_ + ref); ++ + // Validation of referenced block-header. + if (!free_ok) { +- const volatile BlockHeader* const block = +- reinterpret_cast(mem_base_ + ref); + if (block->cookie != kBlockCookieAllocated) + return nullptr; +- if (block->size < size) ++ const uint32_t block_size = block->size; ++ if (block_size < size) { + return nullptr; +- uint32_t block_size; +- if (!base::CheckAdd(ref, block->size).AssignIfValid(&block_size)) { ++ } ++ // Find a validate the end of the block. ++ uint32_t block_end_ref; ++ if (!base::CheckAdd(ref, block_size).AssignIfValid(&block_end_ref)) { + return nullptr; + } +- if (block_size > mem_size_) { ++ if (block_end_ref > mem_size_) { ++ // The end of the alloc extends beyond the allocator's bounds. ++ SetCorrupt(); + return nullptr; + } + if (type_id != 0 && + block->type_id.load(std::memory_order_relaxed) != type_id) { + return nullptr; + } ++ // Return `alloc_size` if requested by the caller. ++ if (alloc_size) { ++ *alloc_size = block_size - sizeof(BlockHeader); ++ } + } + + // Return pointer to block data. +- return reinterpret_cast(mem_base_ + ref); ++ return block; + } + + void PersistentMemoryAllocator::FlushPartial(size_t length, bool sync) { +@@ -1009,10 +1018,11 @@ uint32_t PersistentMemoryAllocator::version() const { + const volatile void* PersistentMemoryAllocator::GetBlockData( + Reference ref, + uint32_t type_id, +- size_t size) const { ++ size_t size, ++ size_t* alloc_size) const { + DCHECK(size > 0); + const volatile BlockHeader* block = +- GetBlock(ref, type_id, size, false, false); ++ GetBlock(ref, type_id, size, false, false, alloc_size); + if (!block) + return nullptr; + return reinterpret_cast(block) + sizeof(BlockHeader); +@@ -1191,14 +1201,14 @@ void FilePersistentMemoryAllocator::Cache() { + base::BlockingType::MAY_BLOCK); + + // Calculate begin/end addresses so that the first byte of every page +- // in that range can be read. Keep within the used space. The |volatile| ++ // in that range can be read. Keep within the used space. The `volatile` + // keyword makes it so the compiler can't make assumptions about what is + // in a given memory location and thus possibly avoid the read. + const volatile char* mem_end = mem_base_ + used(); + const volatile char* mem_begin = mem_base_; + + // Iterate over the memory a page at a time, reading the first byte of +- // every page. The values are added to a |total| so that the compiler ++ // every page. The values are added to a `total` so that the compiler + // can't omit the read. + int total = 0; + for (const volatile char* memory = mem_begin; memory < mem_end; +@@ -1206,7 +1216,7 @@ void FilePersistentMemoryAllocator::Cache() { + total += *memory; + } + +- // Tell the compiler that |total| is used so that it can't optimize away ++ // Tell the compiler that `total` is used so that it can't optimize away + // the memory accesses above. + debug::Alias(&total); + } +@@ -1276,7 +1286,8 @@ span DelayedPersistentAllocation::GetUntyped() const { + #endif // !BUILDFLAG(IS_NACL) + + if (!ref) { +- ref = allocator_->Allocate(size_, type_); ++ [[maybe_unused]] size_t alloc_size = 0; ++ ref = allocator_->Allocate(size_, type_, &alloc_size); + if (!ref) { + return span(); + } +@@ -1292,7 +1303,7 @@ span DelayedPersistentAllocation::GetUntyped() const { + // allocation, and stored its reference. Purge the allocation that was + // just done and use the other one instead. + DCHECK_EQ(type_, allocator_->GetType(existing)); +- DCHECK_LE(size_, allocator_->GetAllocSize(existing)); ++ DCHECK_LE(size_, alloc_size); + allocator_->ChangeType(ref, 0, type_, /*clear=*/false); + ref = existing; + #if !BUILDFLAG(IS_NACL) +@@ -1328,13 +1339,13 @@ span DelayedPersistentAllocation::GetUntyped() const { + SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "size_", size_); + if (ref == 0xC8799269) { + // There are many crash reports containing the corrupted "0xC8799269" +- // value in |ref|. This value is actually a "magic" number to indicate ++ // value in `ref`. This value is actually a "magic" number to indicate + // that a certain block in persistent memory was successfully allocated, + // so it should not appear there. Include some extra crash keys to see if + // the surrounding values were also corrupted. If so, the value before + // would be the size of the allocated object, and the value after would be + // the type id of the allocated object. If they are not corrupted, these +- // would contain |ranges_checksum| and the start of |samples_metadata| ++ // would contain `ranges_checksum` and the start of `samples_metadata` + // respectively (see PersistentHistogramData struct). We do some pointer + // arithmetic here -- it should theoretically be safe, unless something + // went terribly wrong... +diff --git a/base/metrics/persistent_memory_allocator.h b/base/metrics/persistent_memory_allocator.h +index b6977d6cfb0e97d615daeee15e0b34e417886e91..099444e86f67d07ad3274737671b37bb03008648 100644 +--- a/base/metrics/persistent_memory_allocator.h ++++ b/base/metrics/persistent_memory_allocator.h +@@ -171,13 +171,13 @@ class BASE_EXPORT PersistentMemoryAllocator { + // eventually quit. + class BASE_EXPORT Iterator { + public: +- // Constructs an iterator on a given |allocator|, starting at the beginning. ++ // Constructs an iterator on a given `allocator`, starting at the beginning. + // The allocator must live beyond the lifetime of the iterator. This class + // has read-only access to the allocator (hence "const") but the returned + // references can be used on a read/write version, too. + explicit Iterator(const PersistentMemoryAllocator* allocator); + +- // As above but resuming from the |starting_after| reference. The first call ++ // As above but resuming from the `starting_after` reference. The first call + // to GetNext() will return the next object found after that reference. The + // reference must be to an "iterable" object; references to non-iterable + // objects (those that never had MakeIterable() called for them) will cause +@@ -193,7 +193,7 @@ class BASE_EXPORT PersistentMemoryAllocator { + // Resets the iterator back to the beginning. + void Reset(); + +- // Resets the iterator, resuming from the |starting_after| reference. ++ // Resets the iterator, resuming from the `starting_after` reference. + void Reset(Reference starting_after); + + // Returns the previously retrieved reference, or kReferenceNull if none. +@@ -201,17 +201,17 @@ class BASE_EXPORT PersistentMemoryAllocator { + // that value. + Reference GetLast(); + +- // Gets the next iterable, storing that type in |type_return|. The actual ++ // Gets the next iterable, storing that type in `type_return`. The actual + // return value is a reference to the allocation inside the allocator or + // zero if there are no more. GetNext() may still be called again at a + // later time to retrieve any new allocations that have been added. +- Reference GetNext(uint32_t* type_return); ++ Reference GetNext(uint32_t* type_return, size_t* alloc_size = nullptr); + +- // Similar to above but gets the next iterable of a specific |type_match|. ++ // Similar to above but gets the next iterable of a specific `type_match`. + // This should not be mixed with calls to GetNext() because any allocations + // skipped here due to a type mis-match will never be returned by later + // calls to GetNext() meaning it's possible to completely miss entries. +- Reference GetNextOfType(uint32_t type_match); ++ Reference GetNextOfType(uint32_t type_match, size_t* alloc_size = nullptr); + + // As above but works using object type. + template +@@ -244,8 +244,8 @@ class BASE_EXPORT PersistentMemoryAllocator { + } + + // Convert a generic pointer back into a reference. A null reference will +- // be returned if |memory| is not inside the persistent segment or does not +- // point to an object of the specified |type_id|. ++ // be returned if `memory` is not inside the persistent segment or does not ++ // point to an object of the specified `type_id`. + Reference GetAsReference(const void* memory, uint32_t type_id) const { + return allocator_->GetAsReference(memory, type_id); + } +@@ -308,12 +308,12 @@ class BASE_EXPORT PersistentMemoryAllocator { + // The allocator operates on any arbitrary block of memory. Creation and + // persisting or sharing of that block with another process is the + // responsibility of the caller. The allocator needs to know only the +- // block's |base| address, the total |size| of the block, and any internal +- // |page| size (zero if not paged) across which allocations should not span. +- // The |id| is an arbitrary value the caller can use to identify a ++ // block's `base` address, the total `size` of the block, and any internal ++ // `page` size (zero if not paged) across which allocations should not span. ++ // The `id` is an arbitrary value the caller can use to identify a + // particular memory segment. It will only be loaded during the initial + // creation of the segment and can be checked by the caller for consistency. +- // The |name|, if provided, is used to distinguish histograms for this ++ // The `name`, if provided, is used to distinguish histograms for this + // allocator. Only the primary owner of the segment should define this value; + // other processes can learn it from the shared state. If the access mode + // is kReadOnly then no changes will be made to it. The resulting object +@@ -367,12 +367,12 @@ class BASE_EXPORT PersistentMemoryAllocator { + uint8_t GetMemoryState() const; + + // Create internal histograms for tracking memory use and allocation sizes +- // for allocator of |name| (which can simply be the result of Name()). This +- // is done seperately from construction for situations such as when the ++ // for allocator of `name` (which can simply be the result of Name()). This ++ // is done separately from construction for situations such as when the + // histograms will be backed by memory provided by this very allocator. + // + // IMPORTANT: tools/metrics/histograms/metadata/uma/histograms.xml must +- // be updated with the following histograms for each |name| param: ++ // be updated with the following histograms for each `name` param: + // UMA.PersistentAllocator.name.Errors + // UMA.PersistentAllocator.name.UsedPct + void CreateTrackingHistograms(std::string_view name); +@@ -382,13 +382,13 @@ class BASE_EXPORT PersistentMemoryAllocator { + // OS that all the data should be sent to the disk immediately. This is + // useful in the rare case where something has just been stored that needs + // to survive a hard shutdown of the machine like from a power failure. +- // The |sync| parameter indicates if this call should block until the flush ++ // The `sync` parameter indicates if this call should block until the flush + // is complete but is only advisory and may or may not have an effect + // depending on the capabilities of the OS. Synchronous flushes are allowed +- // only from threads that are allowed to do I/O but since |sync| is only ++ // only from threads that are allowed to do I/O but since `sync` is only + // advisory, all flushes should be done on IO-capable threads. +- // TODO: Since |sync| is ignored on Windows, consider making it re-post on a +- // background thread with |sync| set to true so that |sync| is not just ++ // TODO: Since `sync` is ignored on Windows, consider making it re-post on a ++ // background thread with `sync` set to true so that `sync` is not just + // advisory. + void Flush(bool sync); + +@@ -400,9 +400,9 @@ class BASE_EXPORT PersistentMemoryAllocator { + size_t size() const { return mem_size_; } + size_t used() const; + +- // Get an object referenced by a |ref|. For safety reasons, the |type_id| +- // code and size-of(|T|) are compared to ensure the reference is valid +- // and cannot return an object outside of the memory segment. A |type_id| of ++ // Get an object referenced by a `ref`. For safety reasons, the `type_id` ++ // code and size-of(`T`) are compared to ensure the reference is valid ++ // and cannot return an object outside of the memory segment. A `type_id` of + // kTypeIdAny (zero) will match any though the size is still checked. NULL is + // returned if any problem is detected, such as corrupted storage or incorrect + // parameters. Callers MUST check that the returned value is not-null EVERY +@@ -422,7 +422,7 @@ class BASE_EXPORT PersistentMemoryAllocator { + // largest architecture, including at the end. + // + // To protected against mistakes, all objects must have the attribute +- // |kExpectedInstanceSize| (static constexpr size_t) that is a hard-coded ++ // `kExpectedInstanceSize` (static constexpr size_t) that is a hard-coded + // numerical value -- NNN, not sizeof(T) -- that can be tested. If the + // instance size is not fixed, at least one build will fail. + // +@@ -442,27 +442,28 @@ class BASE_EXPORT PersistentMemoryAllocator { + // nature of that keyword to the caller. It can add it back, if necessary, + // based on knowledge of how the allocator is being used. + template +- T* GetAsObject(Reference ref) { ++ T* GetAsObject(Reference ref, size_t* alloc_size = nullptr) { + static_assert(std::is_standard_layout_v, "only standard objects"); + static_assert(!std::is_array_v, "use GetAsArray<>()"); + static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size"); + return const_cast(reinterpret_cast( +- GetBlockData(ref, T::kPersistentTypeId, sizeof(T)))); ++ GetBlockData(ref, T::kPersistentTypeId, sizeof(T), alloc_size))); + } + template +- const T* GetAsObject(Reference ref) const { ++ const T* GetAsObject(Reference ref, size_t* alloc_size = nullptr) const { + static_assert(std::is_standard_layout_v, "only standard objects"); + static_assert(!std::is_array_v, "use GetAsArray<>()"); + static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size"); + return const_cast(reinterpret_cast( +- GetBlockData(ref, T::kPersistentTypeId, sizeof(T)))); ++ GetBlockData(ref, T::kPersistentTypeId, sizeof(T), alloc_size))); + } + +- // Like GetAsObject but get an array of simple, fixed-size types. ++ // Like GetAsObject() but get an array of simple, fixed-size types. + // +- // Use a |count| of the required number of array elements, or kSizeAny. +- // GetAllocSize() can be used to calculate the upper bound but isn't reliable +- // because padding can make space for extra elements that were not written. ++ // Use a `count` of the required number of array elements, or kSizeAny. ++ // The, optionally returned, `alloc_size` can be used to calculate the upper ++ // bound but isn't reliable because padding can make space for extra elements ++ // that were not written. + // + // Remember that an array of char is a string but may not be NUL terminated. + // +@@ -470,29 +471,29 @@ class BASE_EXPORT PersistentMemoryAllocator { + // compatibilty when using these accessors. Only use fixed-size types such + // as char, float, double, or (u)intXX_t. + template +- T* GetAsArray(Reference ref, uint32_t type_id, size_t count) { ++ T* GetAsArray(Reference ref, ++ uint32_t type_id, ++ size_t count, ++ size_t* alloc_size = nullptr) { + static_assert(std::is_fundamental_v, "use GetAsObject<>()"); + return const_cast(reinterpret_cast( +- GetBlockData(ref, type_id, count * sizeof(T)))); ++ GetBlockData(ref, type_id, count * sizeof(T), alloc_size))); + } + template +- const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const { ++ const T* GetAsArray(Reference ref, ++ uint32_t type_id, ++ size_t count, ++ size_t* alloc_size = nullptr) const { + static_assert(std::is_fundamental_v, "use GetAsObject<>()"); + return const_cast(reinterpret_cast( +- GetBlockData(ref, type_id, count * sizeof(T)))); ++ GetBlockData(ref, type_id, count * sizeof(T), alloc_size))); + } + + // Get the corresponding reference for an object held in persistent memory. +- // If the |memory| is not valid or the type does not match, a kReferenceNull ++ // If the `memory` is not valid or the type does not match, a kReferenceNull + // result will be returned. + Reference GetAsReference(const void* memory, uint32_t type_id) const; + +- // Get the number of bytes allocated to a block. This is useful when storing +- // arrays in order to validate the ending boundary. The returned value will +- // include any padding added to achieve the required alignment and so could +- // be larger than given in the original Allocate() request. +- size_t GetAllocSize(Reference ref) const; +- + // Access the internal "type" of an object. This generally isn't necessary + // but can be used to "clear" the type and so effectively mark it as deleted + // even though the memory stays valid and allocated. Changing the type is +@@ -500,8 +501,8 @@ class BASE_EXPORT PersistentMemoryAllocator { + // It will return false if the existing type is not what is expected. + // + // Changing the type doesn't mean the data is compatible with the new type. +- // Passing true for |clear| will zero the memory after the type has been +- // changed away from |from_type_id| but before it becomes |to_type_id| meaning ++ // Passing true for `clear` will zero the memory after the type has been ++ // changed away from `from_type_id` but before it becomes `to_type_id` meaning + // that it is done in a manner that is thread-safe. Memory is guaranteed to + // be zeroed atomically by machine-word in a monotonically increasing order. + // +@@ -553,13 +554,15 @@ class BASE_EXPORT PersistentMemoryAllocator { + // While the above works much like malloc & free, these next methods provide + // an "object" interface similar to new and delete. + +- // Reserve space in the memory segment of the desired |size| and |type_id|. ++ // Reserve space in the memory segment of the desired `size` and `type_id`. + // + // A return value of zero indicates the allocation failed, otherwise the + // returned reference can be used by any process to get a real pointer via +- // the GetAsObject() or GetAsArray calls. The actual allocated size may be ++ // the GetAsObject() or GetAsArray() calls. The actual allocated size may be + // larger and will always be a multiple of 8 bytes (64 bits). +- Reference Allocate(size_t size, uint32_t type_id); ++ Reference Allocate(size_t size, ++ uint32_t type_id, ++ size_t* alloc_size = nullptr); + + // Allocate and construct an object in persistent memory. The type must have + // both (size_t) kExpectedInstanceSize and (uint32_t) kPersistentTypeId +@@ -586,7 +589,7 @@ class BASE_EXPORT PersistentMemoryAllocator { + } + + // Similar to New, above, but construct the object out of an existing memory +- // block and of an expected type. If |clear| is true, memory will be zeroed ++ // block and of an expected type. If `clear` is true, memory will be zeroed + // before construction. Though this is not standard object behavior, it + // is present to match with new allocations that always come from zeroed + // memory. Anything previously present simply ceases to exist; no destructor +@@ -596,13 +599,16 @@ class BASE_EXPORT PersistentMemoryAllocator { + // results. USE WITH CARE! + template + T* New(Reference ref, uint32_t from_type_id, bool clear) { +- DCHECK_LE(sizeof(T), GetAllocSize(ref)) << "alloc not big enough for obj"; + // Make sure the memory is appropriate. This won't be used until after + // the type is changed but checking first avoids the possibility of having + // to change the type back. +- void* mem = const_cast(GetBlockData(ref, 0, sizeof(T))); ++ size_t alloc_size = 0; ++ void* mem = const_cast(GetBlockData(ref, 0, sizeof(T), &alloc_size)); + if (!mem) + return nullptr; ++ ++ DCHECK_LE(sizeof(T), alloc_size) << "alloc not big enough for obj"; ++ + // Ensure the allocator's internal alignment is sufficient for this object. + // This protects against coding errors in the allocator. + DCHECK_EQ(0U, reinterpret_cast(mem) & (alignof(T) - 1)); +@@ -633,7 +639,7 @@ class BASE_EXPORT PersistentMemoryAllocator { + // First change the type to "transitioning" so there is no race condition + // where another thread could find the object through iteration while it + // is been destructed. This will "acquire" the memory so no changes get +- // reordered before it. It will fail if |ref| is invalid. ++ // reordered before it. It will fail if `ref` is invalid. + if (!ChangeType(ref, kTypeIdTransitioning, T::kPersistentTypeId, false)) + return; + // Destruct the object. +@@ -677,7 +683,7 @@ class BASE_EXPORT PersistentMemoryAllocator { + }; + + // Constructs the allocator. Everything is the same as the public allocator +- // except |memory| which is a structure with additional information besides ++ // except `memory` which is a structure with additional information besides + // the base address. + PersistentMemoryAllocator(Memory memory, + size_t size, +@@ -715,32 +721,52 @@ class BASE_EXPORT PersistentMemoryAllocator { + } + + // Actual method for doing the allocation. +- Reference AllocateImpl(size_t size, uint32_t type_id); ++ Reference AllocateImpl(size_t size, uint32_t type_id, size_t* alloc_size); + +- // Gets the block header associated with a specific reference. ++ // Dereferences a block `ref` to retrieve a pointer to the block header for ++ // the reference. This method ensures that the referenced block is valid for ++ // the desired `type_id` and `size`. Optionally, if `alloc_sizes` is not ++ // nullptr, the validated size of the underlying allocation is returned. ++ // ++ // Special cases for internal use only: ++ // ++ // * If `queue_ok` is true and `ref` is kReferenceQueueindicates then the ++ // block header for the allocation queue is returned. ++ // ++ // * if `free_ok` then the block header is allowed to point to a block that ++ // may not be in the `allocated` state. This bypasses block validation. ++ // ++ // Because they bypass block valoidation, it is not premitted to request the ++ // `alloc_size` when either of `queue_ok` or `free_ok` are true. + const volatile BlockHeader* GetBlock(Reference ref, + uint32_t type_id, + size_t size, + bool queue_ok, +- bool free_ok) const; ++ bool free_ok, ++ size_t* alloc_size = nullptr) const; + volatile BlockHeader* GetBlock(Reference ref, + uint32_t type_id, + size_t size, + bool queue_ok, +- bool free_ok) { ++ bool free_ok, ++ size_t* alloc_size = nullptr) { + return const_cast( + const_cast(this)->GetBlock( +- ref, type_id, size, queue_ok, free_ok)); ++ ref, type_id, size, queue_ok, free_ok, alloc_size)); + } + + // Gets the actual data within a block associated with a specific reference. + const volatile void* GetBlockData(Reference ref, + uint32_t type_id, +- size_t size) const; +- volatile void* GetBlockData(Reference ref, uint32_t type_id, size_t size) { ++ size_t size, ++ size_t* alloc_size = nullptr) const; ++ volatile void* GetBlockData(Reference ref, ++ uint32_t type_id, ++ size_t size, ++ size_t* alloc_size = nullptr) { + return const_cast( + const_cast(this)->GetBlockData( +- ref, type_id, size)); ++ ref, type_id, size, alloc_size)); + } + + // Records an error in the internal histogram. +@@ -792,12 +818,12 @@ class BASE_EXPORT LocalPersistentMemoryAllocator + ~LocalPersistentMemoryAllocator() override; + + private: +- // Allocates a block of local memory of the specified |size|, ensuring that ++ // Allocates a block of local memory of the specified `size`, ensuring that + // the memory will not be physically allocated until accessed and will read + // as zero when that happens. + static Memory AllocateLocalMemory(size_t size, std::string_view name); + +- // Deallocates a block of local |memory| of the specified |size|. ++ // Deallocates a block of local `memory` of the specified `size`. + static void DeallocateLocalMemory(void* memory, size_t size, MemoryType type); + }; + +@@ -865,8 +891,8 @@ class BASE_EXPORT ReadOnlySharedPersistentMemoryAllocator + class BASE_EXPORT FilePersistentMemoryAllocator + : public PersistentMemoryAllocator { + public: +- // A |max_size| of zero will use the length of the file as the maximum +- // size. The |file| object must have been already created with sufficient ++ // A `max_size` of zero will use the length of the file as the maximum ++ // size. The `file` object must have been already created with sufficient + // permissions (read, read/write, or read/write/extend). + FilePersistentMemoryAllocator(std::unique_ptr file, + size_t max_size, +@@ -916,18 +942,18 @@ class BASE_EXPORT DelayedPersistentAllocation { + public: + using Reference = PersistentMemoryAllocator::Reference; + +- // Creates a delayed allocation using the specified |allocator|. When +- // needed, the memory will be allocated using the specified |type| and +- // |size|. If |offset| is given, the returned pointer will be at that ++ // Creates a delayed allocation using the specified `allocator`. When ++ // needed, the memory will be allocated using the specified `type` and ++ // `size`. If `offset` is given, the returned pointer will be at that + // offset into the segment; this allows combining allocations into a + // single persistent segment to reduce overhead and means an "all or +- // nothing" request. Note that |size| is always the total memory size +- // and |offset| is just indicating the start of a block within it. ++ // nothing" request. Note that `size` is always the total memory size ++ // and `offset` is just indicating the start of a block within it. + // +- // Once allocated, a reference to the segment will be stored at |ref|. ++ // Once allocated, a reference to the segment will be stored at `ref`. + // This shared location must be initialized to zero (0); it is checked + // with every Get() request to see if the allocation has already been +- // done. If reading |ref| outside of this object, be sure to do an ++ // done. If reading `ref` outside of this object, be sure to do an + // "acquire" load. Don't write to it -- leave that to this object. + DelayedPersistentAllocation(PersistentMemoryAllocator* allocator, + std::atomic* ref, +diff --git a/base/metrics/persistent_memory_allocator_unittest.cc b/base/metrics/persistent_memory_allocator_unittest.cc +index 82a851dc11bd801f8f777a5d3ad0afb6b8de9218..486e854b0dfcf8dc0c188f7cdd2bc0b4b5b0d399 100644 +--- a/base/metrics/persistent_memory_allocator_unittest.cc ++++ b/base/metrics/persistent_memory_allocator_unittest.cc +@@ -140,11 +140,12 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) { + ASSERT_TRUE(obj1); + Reference block1 = allocator_->GetAsReference(obj1); + ASSERT_NE(0U, block1); +- EXPECT_NE(nullptr, allocator_->GetAsObject(block1)); + EXPECT_EQ(nullptr, allocator_->GetAsObject(block1)); +- EXPECT_LE(sizeof(TestObject1), allocator_->GetAllocSize(block1)); +- EXPECT_GT(sizeof(TestObject1) + kAllocAlignment, +- allocator_->GetAllocSize(block1)); ++ size_t alloc_size_1 = 0; ++ EXPECT_NE(nullptr, ++ allocator_->GetAsObject(block1, &alloc_size_1)); ++ EXPECT_LE(sizeof(TestObject1), alloc_size_1); ++ EXPECT_GT(sizeof(TestObject1) + kAllocAlignment, alloc_size_1); + PersistentMemoryAllocator::MemoryInfo meminfo1; + allocator_->GetMemoryInfo(&meminfo1); + EXPECT_EQ(meminfo0.total, meminfo1.total); +@@ -180,11 +181,12 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) { + ASSERT_TRUE(obj2); + Reference block2 = allocator_->GetAsReference(obj2); + ASSERT_NE(0U, block2); +- EXPECT_NE(nullptr, allocator_->GetAsObject(block2)); + EXPECT_EQ(nullptr, allocator_->GetAsObject(block2)); +- EXPECT_LE(sizeof(TestObject2), allocator_->GetAllocSize(block2)); +- EXPECT_GT(sizeof(TestObject2) + kAllocAlignment, +- allocator_->GetAllocSize(block2)); ++ size_t alloc_size_2 = 0; ++ EXPECT_NE(nullptr, ++ allocator_->GetAsObject(block2, &alloc_size_2)); ++ EXPECT_LE(sizeof(TestObject2), alloc_size_2); ++ EXPECT_GT(sizeof(TestObject2) + kAllocAlignment, alloc_size_2); + PersistentMemoryAllocator::MemoryInfo meminfo2; + allocator_->GetMemoryInfo(&meminfo2); + EXPECT_EQ(meminfo1.total, meminfo2.total); +@@ -965,10 +967,10 @@ TEST(FilePersistentMemoryAllocatorTest, AcceptableTest) { + uint32_t type_id; + Reference ref; + while ((ref = iter.GetNext(&type_id)) != 0) { ++ size_t size = 0; + const char* data = allocator.GetAsArray( +- ref, 0, PersistentMemoryAllocator::kSizeAny); ++ ref, 0, PersistentMemoryAllocator::kSizeAny, &size); + uint32_t type = allocator.GetType(ref); +- size_t size = allocator.GetAllocSize(ref); + // Ensure compiler can't optimize-out above variables. + (void)data; + (void)type; +diff --git a/components/metrics/persistent_system_profile.cc b/components/metrics/persistent_system_profile.cc +index a04737dc52b0c394dc29c9d4cd274f89af5f02c1..9c29c53530872c7392607ad4cf75dbaabd60870c 100644 +--- a/components/metrics/persistent_system_profile.cc ++++ b/components/metrics/persistent_system_profile.cc +@@ -104,7 +104,7 @@ bool PersistentSystemProfile::RecordAllocator::Write(RecordType type, + if (!AddSegment(remaining_size)) + return false; + } +- // Write out as much of the data as possible. |data| and |remaining_size| ++ // Write out as much of the data as possible. `data` and `remaining_size` + // are updated in place. + if (!WriteData(type, &data, &remaining_size)) + return false; +@@ -147,8 +147,7 @@ bool PersistentSystemProfile::RecordAllocator::Read(RecordType* type, + + bool PersistentSystemProfile::RecordAllocator::NextSegment() const { + base::PersistentMemoryAllocator::Iterator iter(allocator_, alloc_reference_); +- alloc_reference_ = iter.GetNextOfType(kTypeIdSystemProfile); +- alloc_size_ = allocator_->GetAllocSize(alloc_reference_); ++ alloc_reference_ = iter.GetNextOfType(kTypeIdSystemProfile, &alloc_size_); + end_offset_ = 0; + return alloc_reference_ != 0; + } +@@ -169,13 +168,15 @@ bool PersistentSystemProfile::RecordAllocator::AddSegment(size_t min_size) { + size_t size = + std::max(CalculateRecordSize(min_size), kSystemProfileAllocSize); + +- uint32_t ref = allocator_->Allocate(size, kTypeIdSystemProfile); ++ size_t new_alloc_size = 0; ++ uint32_t ref = ++ allocator_->Allocate(size, kTypeIdSystemProfile, &new_alloc_size); + if (!ref) + return false; // Allocator must be full. + allocator_->MakeIterable(ref); + + alloc_reference_ = ref; +- alloc_size_ = allocator_->GetAllocSize(ref); ++ alloc_size_ = new_alloc_size; + return true; + } + +@@ -284,7 +285,7 @@ void PersistentSystemProfile::RegisterPersistentAllocator( + base::PersistentMemoryAllocator* memory_allocator) { + DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); + +- // Create and store the allocator. A |min_size| of "1" ensures that a memory ++ // Create and store the allocator. A `min_size` of "1" ensures that a memory + // block is reserved now. + RecordAllocator allocator(memory_allocator, 1); + allocators_.push_back(std::move(allocator)); diff --git a/patches/skia/.patches b/patches/skia/.patches index 35c07619677b..87b0775e0887 100644 --- a/patches/skia/.patches +++ b/patches/skia/.patches @@ -1 +1,2 @@ sksl_rp_prevent_overflow_when_computing_slot_allocation_size.patch +ganesh_avoid_overflow_when_combining_aahairlineops.patch diff --git a/patches/skia/ganesh_avoid_overflow_when_combining_aahairlineops.patch b/patches/skia/ganesh_avoid_overflow_when_combining_aahairlineops.patch new file mode 100644 index 000000000000..af2dfe261438 --- /dev/null +++ b/patches/skia/ganesh_avoid_overflow_when_combining_aahairlineops.patch @@ -0,0 +1,62 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: James Godfrey-Kittle +Date: Tue, 17 Dec 2024 12:14:17 -0500 +Subject: [ganesh] Avoid overflow when combining AAHairlineOps + +Bug: b/382786791 +Change-Id: I955d943015cce76f75221df9fab0897a6f22fe4b +Reviewed-on: https://skia-review.googlesource.com/c/skia/+/930577 +Reviewed-by: Michael Ludwig +Commit-Queue: James Godfrey-Kittle +(cherry picked from commit 8b030e47588af50f56ef380d81a17667baeb582b) +Reviewed-on: https://skia-review.googlesource.com/c/skia/+/935337 +Reviewed-by: James Godfrey-Kittle +Auto-Submit: Michael Ludwig +Commit-Queue: Michael Ludwig + +diff --git a/src/gpu/ganesh/ops/AAHairLinePathRenderer.cpp b/src/gpu/ganesh/ops/AAHairLinePathRenderer.cpp +index dd37a8ff200a70465669720d06e13bbc0ff389f0..570eeb8faad6e86908d957320dab0876c64a473b 100644 +--- a/src/gpu/ganesh/ops/AAHairLinePathRenderer.cpp ++++ b/src/gpu/ganesh/ops/AAHairLinePathRenderer.cpp +@@ -10,6 +10,7 @@ + #include "include/core/SkPoint3.h" + #include "include/private/base/SkFloatingPoint.h" + #include "include/private/base/SkTemplates.h" ++#include "src/base/SkSafeMath.h" + #include "src/core/SkGeometry.h" + #include "src/core/SkMatrixPriv.h" + #include "src/core/SkPointPriv.h" +@@ -1179,16 +1180,28 @@ void AAHairlineOp::onPrepareDraws(GrMeshDrawTarget* target) { + + int instanceCount = fPaths.size(); + bool convertConicsToQuads = !target->caps().shaderCaps()->fFloatIs32Bits; +- for (int i = 0; i < instanceCount; i++) { ++ SkSafeMath safeMath; ++ for (int i = 0; i < instanceCount && safeMath.ok(); i++) { + const PathData& args = fPaths[i]; +- quadCount += gather_lines_and_quads(args.fPath, args.fViewMatrix, args.fDevClipBounds, +- args.fCapLength, convertConicsToQuads, &lines, &quads, +- &conics, &qSubdivs, &cWeights); ++ quadCount = safeMath.addInt(quadCount, ++ gather_lines_and_quads(args.fPath, ++ args.fViewMatrix, ++ args.fDevClipBounds, ++ args.fCapLength, ++ convertConicsToQuads, ++ &lines, ++ &quads, ++ &conics, ++ &qSubdivs, ++ &cWeights)); + } + + int lineCount = lines.size() / 2; + int conicCount = conics.size() / 3; +- int quadAndConicCount = conicCount + quadCount; ++ int quadAndConicCount = safeMath.addInt(conicCount, quadCount); ++ if (!safeMath.ok()) { ++ return; ++ } + + static constexpr int kMaxLines = SK_MaxS32 / kLineSegNumVertices; + static constexpr int kMaxQuadsAndConics = SK_MaxS32 / kQuadNumVertices; diff --git a/patches/v8/.patches b/patches/v8/.patches index 251e9ae3665c..82e9229e946f 100644 --- a/patches/v8/.patches +++ b/patches/v8/.patches @@ -11,4 +11,5 @@ cherry-pick-aad648bd2af9.patch cherry-pick-cb0d9e1d7b88.patch merged_wasm_arm_tail-call_free_scratch_register_earlier.patch merged_turboshaft_wasm_wasmgctypeanalyzer_fix_phi_input_for.patch -merged_turboshaft_wasm_wasmgctypeanalyzer_fix_single-block_loops.patch \ No newline at end of file +merged_turboshaft_wasm_wasmgctypeanalyzer_fix_single-block_loops.patch +merged_interpreter_fix_hole_elision_scope_for_switch_jump_tables.patch diff --git a/patches/v8/cherry-pick-aad648bd2af9.patch b/patches/v8/cherry-pick-aad648bd2af9.patch index 110007bfff2a..3128de050c81 100644 --- a/patches/v8/cherry-pick-aad648bd2af9.patch +++ b/patches/v8/cherry-pick-aad648bd2af9.patch @@ -1,7 +1,10 @@ -From aad648bd2af9815d0c48eeb78cbf3d8e6471d094 Mon Sep 17 00:00:00 2001 +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 From: Darius Mercadier -Date: Thu, 05 Dec 2024 16:03:33 +0100 -Subject: [PATCH] Merged: [maglev] Avoid retagging loop phi backedges too early +Date: Thu, 5 Dec 2024 16:03:33 +0100 +Subject: Merged: [maglev] Avoid retagging loop phi backedges too early +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit When we decide that a loop phi should remain tagged, we call EnsurePhiInputsTagged to ensures that it only has tagged inputs, which @@ -32,13 +35,12 @@ Reviewed-by: Camillo Bruni Cr-Commit-Position: refs/branch-heads/13.0@{#41} Cr-Branched-From: 4be854bd71ea878a25b236a27afcecffa2e29360-refs/heads/13.0.245@{#1} Cr-Branched-From: 1f5183f7ad6cca21029fd60653d075730c644432-refs/heads/main@{#96103} ---- diff --git a/src/maglev/maglev-phi-representation-selector.cc b/src/maglev/maglev-phi-representation-selector.cc -index c03974e..b4d913d 100644 +index 21952ebd08986033ff151f1ddda5904291985025..65864341c1fe582e44c1c6babd716ef38dbd559f 100644 --- a/src/maglev/maglev-phi-representation-selector.cc +++ b/src/maglev/maglev-phi-representation-selector.cc -@@ -334,7 +334,8 @@ +@@ -329,7 +329,8 @@ void MaglevPhiRepresentationSelector::EnsurePhiInputsTagged(Phi* phi) { // should be tagged. We'll thus insert tagging operation on the untagged phi // inputs of {phi}. @@ -50,7 +52,7 @@ index c03974e..b4d913d 100644 phi->change_input(i, EnsurePhiTagged(phi_input, phi->predecessor_at(i), diff --git a/test/mjsunit/maglev/regress-382190919.js b/test/mjsunit/maglev/regress-382190919.js new file mode 100644 -index 0000000..773f442 +index 0000000000000000000000000000000000000000..773f442cb98b914328cdd6e24a8eca1ef6d8a9d6 --- /dev/null +++ b/test/mjsunit/maglev/regress-382190919.js @@ -0,0 +1,39 @@ diff --git a/patches/v8/merged_interpreter_fix_hole_elision_scope_for_switch_jump_tables.patch b/patches/v8/merged_interpreter_fix_hole_elision_scope_for_switch_jump_tables.patch new file mode 100644 index 000000000000..a7fdb19dca32 --- /dev/null +++ b/patches/v8/merged_interpreter_fix_hole_elision_scope_for_switch_jump_tables.patch @@ -0,0 +1,106 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Shu-yu Guo +Date: Mon, 18 Nov 2024 16:02:28 -0800 +Subject: Merged: [interpreter] Fix hole elision scope for switch jump tables + +(cherry picked from commit 5c3b50c26c50e68dbedf8ff991249e75e46ef06e) + +Change-Id: Id6bf2b62598b85a05c6cc7bd06b6cce673d7342a +Bug: 374627491 +Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/6042925 +Commit-Queue: Shu-yu Guo +Commit-Queue: Rezvan Mahdavi Hezaveh +Auto-Submit: Shu-yu Guo +Reviewed-by: Rezvan Mahdavi Hezaveh +Cr-Commit-Position: refs/branch-heads/13.2@{#18} +Cr-Branched-From: 24068c59cedad9ee976ddc05431f5f497b1ebd71-refs/heads/13.2.152@{#1} +Cr-Branched-From: 6054ba94db0969220be4f94dc1677fc4696bdc4f-refs/heads/main@{#97085} + +diff --git a/src/interpreter/bytecode-generator.cc b/src/interpreter/bytecode-generator.cc +index 05d48601ec63c7df0c1a4c351be464074ad8b262..2c28bb53c88c70fd65bc0d208542ad35490f5044 100644 +--- a/src/interpreter/bytecode-generator.cc ++++ b/src/interpreter/bytecode-generator.cc +@@ -2353,6 +2353,9 @@ void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { + // Are we still using any if-else bytecodes to evaluate the switch? + bool use_jumps = n_comp_cases != 0; + ++ // Does the comparison for non-jump table jumps need an elision scope? ++ bool jump_comparison_needs_hole_check_elision_scope = false; ++ + SwitchBuilder switch_builder(builder(), block_coverage_builder_, stmt, + n_comp_cases, jump_table); + ControlScopeForBreakable scope(this, stmt, &switch_builder); +@@ -2410,6 +2413,10 @@ void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { + info.covered_cases); + + if (use_jumps) { ++ // When using a jump table, the first jump comparison is conditionally ++ // executed if the discriminant wasn't matched by anything in the jump ++ // table, and so needs its own elision scope. ++ jump_comparison_needs_hole_check_elision_scope = true; + builder()->LoadAccumulatorWithRegister(r1); + } + } +@@ -2430,16 +2437,14 @@ void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { + // The comparisons linearly dominate, so no need to open a new elision + // scope for each one. + base::Optional elider; +- bool first_jump_emitted = false; + for (int i = 0; i < clauses->length(); ++i) { + CaseClause* clause = clauses->at(i); + if (clause->is_default()) { + info.default_case = i; + } else if (!info.CaseExists(clause->label())) { +- // The first non-default label is +- // unconditionally executed, so we only need to emplace it before +- // visiting the second non-default label. +- if (first_jump_emitted) elider.emplace(this); ++ if (jump_comparison_needs_hole_check_elision_scope && !elider) { ++ elider.emplace(this); ++ } + + // Perform label comparison as if via '===' with tag. + VisitForAccumulatorValue(clause->label()); +@@ -2450,7 +2455,9 @@ void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { + #endif + switch_builder.JumpToCaseIfTrue(ToBooleanMode::kAlreadyBoolean, + case_compare_ctr++); +- first_jump_emitted = true; ++ // The second and subsequent non-default comparisons are always ++ // conditionally executed, and need an elision scope. ++ jump_comparison_needs_hole_check_elision_scope = true; + } + } + } +diff --git a/test/mjsunit/regress/regress-374627491.js b/test/mjsunit/regress/regress-374627491.js +new file mode 100644 +index 0000000000000000000000000000000000000000..ebb7e1d93f788f10606b4787cfacd79c3807ca0c +--- /dev/null ++++ b/test/mjsunit/regress/regress-374627491.js +@@ -0,0 +1,26 @@ ++// Copyright 2024 the V8 project authors. All rights reserved. ++// Use of this source code is governed by a BSD-style license that can be ++// found in the LICENSE file. ++ ++class B { } ++class C extends B { ++ constructor() { ++ let x = 0; ++ switch (0) { ++ case 0: ++ case 1: ++ case 2: ++ case 3: ++ case 4: ++ case 5: ++ case 6: ++ case 7: ++ case 8: ++ case 9: ++ x += this; ++ break; ++ case this: ++ } ++ } ++} ++assertThrows(() => { new C(); }, ReferenceError);