Compare commits

..

1 commit

Author SHA1 Message Date
ce8b8971f5
TargetRid=linux-musl-x64 2023-04-19 12:52:40 -04:00
584 changed files with 27420 additions and 17638 deletions

View file

@ -1,177 +0,0 @@
#!/bin/bash
# expects the following env variables:
# downstream: downstream repo
#
# env variables to track minor or bug-fix updates
# minor_only: array of packages that should only track minor releases (seperate by space)
# default: none
# all packages: all
# fix_only: array of packages that should only track bug fix releases (seperated by space)
# default: none
# all packages: all
#
# If either minor_only or fix_only is set, only packages with semantic versioning schemes
# will be tracked.
#
# If a package is both minor_only and fix_only, the minor releases will be tracked
#
# optional env variables
# ALL_PACKAGES: when true, ignore is package is owned by me
# skip_package: array of packages to skip
# add_package: array of additional packages to check
#
repo=${downstream/*\/}
release=${downstream/\/$repo/}
release=${release/*\/}
release=${release/v}
arch=$(apk --print-arch)
# add special case for postmarketos
[ "$release" == "postmarketos" ] && { release=$repo; repo="pmos"; arch="aarch64"; }
[ "$release" == "master" ] && release=edge
is_semantic() {
local downstream_version_dot=${1//[^.]}
if [[ ${#downstream_version_dot} -eq 2 ]]; then
return 0
fi
return 1
}
echo "Checking $downstream for out of date packages"
curl --silent $downstream/$arch/APKINDEX.tar.gz | tar -O -zx APKINDEX > APKINDEX
if [ "$ALL_PACKAGES" == "true" ]; then
owned_by_you=$(awk -F ':' '{if($1=="o"){print $2}}' APKINDEX | sort | uniq)
echo "Found $(printf '%s\n' $owned_by_you | wc -l ) packages"
else
owned_by_you=$(awk -v RS= -v ORS="\n\n" '/m:Antoine Martin \(ayakael\) <dev@ayakael.net>/' APKINDEX | awk -F ':' '{if($1=="o"){print $2}}' | sort | uniq)
echo "Found $(printf '%s\n' $owned_by_you | wc -l ) packages owned by you"
fi
# add additionnal packages
owned_by_you="$owned_by_you $add_package"
rm -f out_of_date not_in_anitya
for pkg in $owned_by_you; do
downstream_version=$(sed -n "/^P:$pkg$/,/^$/p" APKINDEX | awk -F ':' '{if($1=="V"){print $2}}' | sort -V | tail -n 1)
downstream_version=${downstream_version/-*}
# skip package if in $skip_package array
if [[ "$skip_package" == *$pkg* ]]; then
echo "$pkg skipped"
continue
fi
# special cases where package is not semantic
case $pkg in
# track u-boot-pine64-pinenote against mainline u-boot, and track upstream rockchip blobs
u-boot-pine64-pinenote)
upstream_version="$(curl --fail -X GET -s -H 'Content-Type: application/json' "https://release-monitoring.org/api/project/Alpine/u-boot" | jq -r '.stable_versions.[]' | head -n1)"
commit=$(sed -n "/^P:$pkg$/,/^$/p" APKINDEX | awk -F ':' '{if($1=="c"){print $2}}')
commit=${commit/-dirty/}
# fetches upstream version for blobs using ini file
upstream_trust="$(curl --fail -s 'https://raw.githubusercontent.com/rockchip-linux/rkbin/master/RKTRUST/RK3566TRUST_ULTRA.ini' | grep bl31 | awk -F '=' '{if($1"="PATH){print $2}}' | grep -o -P '(?<=_v).*(?=.elf)')"
upstream_ddr="$(curl --fail -s 'https://raw.githubusercontent.com/rockchip-linux/rkbin/master/RKBOOT/RK3566MINIALL_ULTRA.ini' | grep ddr | awk -F '=' '{if($1"="PATH){print $2}}' | head -n 1 | grep -o -P '(?<=_v).*(?=.bin)')"
# extracts downstream version via _trust_ver and _ddr_ver variable
downstream_trust=$(curl --fail -X GET -s "https://gitlab.postmarketos.org/postmarketOS/pmaports/-/raw/$commit/device/testing/u-boot-pine64-pinenote/APKBUILD" | awk -F '=' '{if($1=="_trust_ver"){print $2}}')
downstream_ddr=$(curl --fail -X GET -s "https://gitlab.postmarketos.org/postmarketOS/pmaports/-/raw/$commit/device/testing/u-boot-pine64-pinenote/APKBUILD" | awk -F '=' '{if($1=="_ddr_ver"){print $2}}')
# compares versions and creates newline in out_of_date if problematic
if [ "$upstream_trust" != "$downstream_trust" ]; then
echo "$pkg new Trust blob $upstream_trust version available"
echo "$pkg(trust) $downstream_trust $upstream_trust $repo $release" >> out_of_date
fi
if [ "$upstream_ddr" != "$downstream_ddr" ]; then
echo "$pkg new ddr blob $upstream_ddr version available"
echo "$pkg(ddr) $downstream_ddr $upstream_ddr $repo $release" >> out_of_date
fi
;;
# release-monitoring omits the extra B, while we keep it but put it after the version no.
looking-glass) upstream_version="$(curl --fail -X GET -s -H 'Content-Type: application/json' "https://release-monitoring.org/api/project/Alpine/$pkg" | jq -r '.stable_versions.[]' | head -n1)b";;
# we want to track both Firefox security upgrades + Zotero upgrades
zotero)
commit=$(sed -n "/^P:$pkg$/,/^$/p" APKINDEX | awk -F ':' '{if($1=="c"){print $2}}')
downstream_fx_ver=$(curl --fail -X GET -s "https://gitlab.alpinelinux.org/alpine/aports/-/raw/$commit/community/zotero/APKBUILD" | awk -F '=' '{if($1=="_fxver"){print $2}}')
upstream_fx_ver=$(curl --fail -X GET -s -H 'Content-Type: application/json' "https://release-monitoring.org/api/project/Alpine/firefox-esr" | jq -r ".stable_versions.[] | match(\"${downstream_fx_ver/.*.*}.*\").string" | head -n1)
if [ "$upstream_fx_ver" != "$downstream_fx_ver" ]; then
echo "$pkg new Firefox $upstream_fx_ver version available"
echo "$pkg(fx_ver) $downstream_fx_ver $upstream_fx_ver $repo $release" >> out_of_date
fi
upstream_version=$(curl --fail -X GET -s -H 'Content-Type: application/json' "https://release-monitoring.org/api/project/Alpine/$pkg" | jq -r '.stable_versions.[]' | head -n1)
;;
# aports omits the -beta part of the version
freetube) upstream_version=$(curl --fail -X GET -s -H 'Content-Type: application/json' "https://release-monitoring.org/api/v2/packages/?name=$pkg&distribution=Alpine" | jq -r '.items.[].version' | sed "s|-beta||");;
# we only track x.x.1xx feature branches of SDK and stage0
dotnet*sdk|dotnet*stage0) upstream_version=$(curl --fail -X GET -s -H 'Content-Type: application/json' "https://release-monitoring.org/api/project/Alpine/$pkg" | jq -r ".stable_versions.[] | match(\"${downstream_version::-2}.*\").string" | sed 's|-.*||' | head -n1);;
# we want to track both current major version and upstream latest
electron)
upstream_version=$(curl --fail -X GET -s -H 'Content-Type: application/json' "https://release-monitoring.org/api/v2/projects/?name=$pkg&distribution=Alpine" | jq -r '.items.[].stable_versions' | jq -r ".[] | match(\"${downstream_version/.*}.*\").string" | head -n 1)
latest_version=$(curl --fail -X GET -s -H 'Content-Type: application/json' "https://release-monitoring.org/api/v2/packages/?name=$pkg&distribution=Alpine" | jq -r '.items.[].stable_version' )
# append version number to signal that this is not latest major version
if [ "${upstream_version/.*}" != "${latest_version/.*}" ]; then
echo "$pkg(${latest_version/.*}) major version available"
echo "$pkg(${latest_version/.*}) $downstream_version $latest_version $repo $release" >> out_of_date
pkg="$pkg(${upstream_version/.*})"
fi
;;
# we want to track LTS rather than latest
arm-trusted-firmware) upstream_version=$(curl --fail -X GET -s -H 'Content-Type: application/json' "https://release-monitoring.org/api/project/Alpine/$pkg" | jq -r ".stable_versions.[] | match(\"${downstream_version%.*}.*\").string" | head -n1);;
# track linux-pine64-pinenote against latest
linux-pine64-pinenote)
upstream_version=$(curl --fail -X GET -s -H 'Content-Type: application/json' "https://release-monitoring.org/api/project/315000" | jq -r ".stable_versions.[] | match(\"${downstream_version%.*}.*\").string" | head -n1)
latest_version=$(curl --fail -X GET -s -H 'Content-Type: application/json' "https://release-monitoring.org/api/project/315000" | jq -r ".stable_versions.[]" | head -n1)
# append version number to signal that this is not latest major version
if [ "${upstream_version/.*.*}" != "${latest_version/.*.*}" ]; then
echo "$pkg(${latest_version/.*.*}) major version available"
echo "$pkg(${latest_version/.*.*}) $downstream_version $latest_version $repo $release" >> out_of_date
pkg="$pkg(${upstream_version%.*})"
fi
;;
# track linux-radxa against BSP kernel (usually got awful late
linux-radxa)
upstream_version=$(curl --fail -X GET -s -H 'Content-Type: application/json' "https://release-monitoring.org/api/project/Alpine/$pkg" | jq -r '.stable_versions.[]' | head -n1)
upstream_version=${upstream_version/-*}
;;
# removes last bit in github tag from usbboot release, as not needed
raspberrypi-usbboot) curl --fail -X GET -s -H 'Content-Type: application/json' "https://release-monitoring.org/api/project/Alpine/$pkg" | jq -r '.stable_versions.[]' | head -n1 | sed 's|-.*||';;
*)
# continues when package version scheme is not semantic, but minor_only or fix_only is set
if [ -n "${minor_only}" ] || [ -n "${fix_only}" ]; then
if ! is_semantic $downstream_version; then
echo "$pkg is not semantic, and fix_only or minor_only is set"
continue
fi
fi
if [ "${minor_only}" == "all" ] || [[ "${minor_only}" == *$pkg* ]]; then
upstream_version=$(curl --fail -X GET -s -H 'Content-Type: application/json' "https://release-monitoring.org/api/project/Alpine/$pkg" | jq -r ".stable_versions.[] | match(\"${downstream_version%.*.*}.*\").string" | head -n1)
elif [ "${fix_only}" == "all" ] || [[ "${fix_only}" == *$pkg* ]]; then
upstream_version=$(curl --fail -X GET -s -H 'Content-Type: application/json' "https://release-monitoring.org/api/project/Alpine/$pkg" | jq -r ".stable_versions.[] | match(\"${downstream_version%.*}.*\").string" | head -n1)
else
upstream_version=$(curl --fail -X GET -s -H 'Content-Type: application/json' "https://release-monitoring.org/api/project/Alpine/$pkg" | jq -r '.stable_versions.[]' | head -n1)
fi
;;
esac
if [ -z "$upstream_version" ]; then
echo "$pkg not in anitya"
# do not track not_in_anitya if either minor_only or fix_only is set
if [ -z ${minor_only+x} ] && [ -z ${fix_only+x} ]; then
echo "$pkg" >> not_in_anitya
fi
elif [ "$downstream_version" != "$(printf '%s\n' $upstream_version $downstream_version | sort -V | head -n 1)" ]; then
echo "$pkg higher downstream $upstream_version"
continue
elif [ "$upstream_version" != "$downstream_version" ]; then
echo "$pkg upstream version $upstream_version does not match downstream version $downstream_version in $release"
echo "$pkg $downstream_version $upstream_version $repo $release" >> out_of_date
fi
done

View file

@ -1,22 +0,0 @@
#!/bin/sh
TARGET_REPO=$1
ARCH="x86 x86_64 armhf armv7 aarch64 ppc64le s390x mips64 riscv64 loongarch64"
for arch in $ARCH; do
# check if repo exists
wget --spider $TARGET_REPO/$arch/APKINDEX.tar.gz -o /dev/null || continue
echo ">>> Clearing repo $TARGET_REPO/$arch"
curl --silent $TARGET_REPO/$arch/APKINDEX.tar.gz | tar -O -zx APKINDEX > APKINDEX
pkgs=$(awk -F ':' '{if($1=="o"){print $2}}' APKINDEX | sort | uniq)
for pkg in $pkgs; do
pkgvers=$(sed -n "/^P:$pkg$/,/^$/p" APKINDEX | awk -F ':' '{if($1=="V"){print $2}}')
for pkgver in $pkgvers; do
echo "Deleting $pkg-$pkgver of arch $arch from $TARGET_REPO"
curl -s --user $FORGE_REPO_USER:$FORGE_REPO_TOKEN -X DELETE $TARGET_REPO/$arch/$pkg-$pkgver.apk
done
done
done

View file

@ -1,189 +0,0 @@
#!/bin/bash
# expects:
# env variable ISSUE_TOKEN
# file out_of_date
IFS='
'
repo=${downstream/*\/}
does_it_exist() {
name=$1
downstream_version=$2
upstream_version=$3
repo=$4
release=$5
query="$repo/$name: upgrade to $upstream_version"
if [ "$release" != "edge" ]; then
query="%22[$release] $query%22"
elif [ "$repo" != "pmos" ] && [ "$repo" != "user" ]; then
# workaround to this query matching both stable and edge branch
query="%22$query%22&labels=Edge"
else
query="%22$query%22"
fi
query="$(echo $query | sed 's| |%20|g' | sed 's|:|%3A|g' | sed 's|/|%2F|g' | sed 's|\[|%5B|g' | sed 's|\]|%5D|g')"
result="$(curl --silent -X 'GET' \
"$GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/issues?state=open&q=$query&type=issues&sort=latest" \
-H 'accept: application/json' \
-H "Authorization: token $ISSUE_TOKEN"
)"
if [ "$result" == "[]" ]; then
return 1
fi
}
is_it_old() {
name=$1
downstream_version=$2
upstream_version=$3
repo=$4
release=$5
query="$repo/$name: upgrade to"
if [ "$release" != "edge" ]; then
query="%22[$release] $query%22"
elif [ "$repo" != "pmos" ] && [ "$repo" != "user" ]; then
# workaround to this query matching both stable and edge branch
query="%22$query%22&labels=Edge"
else
query="%22$query%22"
fi
query="$(echo $query | sed 's| |%20|g' | sed 's|:|%3A|g' | sed 's|/|%2F|g' | sed 's|\[|%5B|g' | sed 's|\]|%5D|g')"
result="$(curl --silent -X 'GET' \
"$GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/issues?state=open&q=$query&type=issues&sort=latest" \
-H 'accept: application/json' \
-H "authorization: token $ISSUE_TOKEN"
)"
result_title="$(echo $result | jq -r '.[].title' )"
result_id="$(echo $result | jq -r '.[].number' )"
result_upstream_version="$(echo $result_title | awk '{print $4}')"
if [ "$upstream_version" != "$result_upstream_version" ]; then
echo $result_id
else
echo 0
fi
}
update_title() {
name=$1
downstream_version=$2
upstream_version=$3
repo=$4
release=$5
id=$6
result=$(curl --silent -X 'PATCH' \
"$GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/issues/$id" \
-H 'accept: application/json' \
-H "authorization: token $ISSUE_TOKEN" \
-H 'Content-Type: application/json' \
-d "{
\"title\": \"$repo/$name: upgrade to $upstream_version\"
}"
)
return 0
}
create_issue() {
name=$1
downstream_version=$2
upstream_version=$3
repo=$4
release=$5
title="$repo/$name: upgrade to $upstream_version"
if [ "$release" != "edge" ]; then title="[$release] $title"; fi
result=$(curl --silent -X 'POST' \
"$GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/issues" \
-H 'accept: application/json' \
-H "authorization: token $ISSUE_TOKEN" \
-H 'Content-Type: application/json' \
-d "{
\"title\": \"$title\",
\"labels\": [
$LABEL_NUMBER
]
}")
return 0
}
if [ -f out_of_date ]; then
out_of_date="$(cat out_of_date)"
echo "Detected $(wc -l out_of_date) out-of-date packages, creating issues"
for pkg in $out_of_date; do
name="$(echo $pkg | awk '{print $1}')"
downstream_version="$(echo $pkg | awk '{print $2}')"
upstream_version="$(echo $pkg | awk '{print $3}')"
repo="$(echo $pkg | awk '{print $4}')"
release="$(echo $pkg | awk '{print $5}')"
if does_it_exist $name $downstream_version $upstream_version $repo $release; then
echo "Issue for $repo/$name already exists"
continue
fi
id=$(is_it_old $name $downstream_version $upstream_version $repo $release)
if [ "$id" != "0" ] && [ -n "$id" ]; then
echo "Issue for $repo/$name needs updating"
update_title $name $downstream_version $upstream_version $repo $release $id
continue
fi
echo "Creating issue for $repo/$name"
create_issue $name $downstream_version $upstream_version $repo $release
done
fi
if [ -f not_in_anitya ]; then
query="Add missing $repo packages to anitya"
query="%22$(echo $query | sed 's| |%20|g')%22"
result="$(curl --silent -X 'GET' \
"$GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/issues?state=open&q=$query&type=issues&sort=latest" \
-H 'accept: application/json' \
-H "authorization: token $ISSUE_TOKEN"
)"
if [ "$result" == "[]" ]; then
echo "Creating anitya issue"
result=$(curl --silent -X 'POST' \
"$GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/issues" \
-H 'accept: application/json' \
-H "authorization: token $ISSUE_TOKEN" \
-H 'Content-Type: application/json' \
-d "{
\"title\": \"Add missing $repo packages to anitya\",
\"body\": \"- [ ] $(sed '{:q;N;s/\n/\\n- [ ] /g;t q}' not_in_anitya)\",
\"labels\": [
$LABEL_NUMBER
]
}")
else
echo "Updating anitya issue"
result_id="$(echo $result | jq -r '.[].number' )"
result=$(curl --silent -X 'PATCH' \
"$GITHUB_SERVER_URL/api/v1/repos/$GITHUB_REPOSITORY/issues/$result_id" \
-H 'accept: application/json' \
-H "authorization: token $ISSUE_TOKEN" \
-H 'Content-Type: application/json' \
-d "{
\"body\": \"- [ ] $(sed '{:q;N;s/\n/\\n- [ ] /g;t q}' not_in_anitya)\"
}"
)
fi
fi

View file

@ -1,26 +0,0 @@
#!/bin/sh
# shellcheck disable=SC3040
set -eu -o pipefail
readonly REPOS="backports user"
readonly BASEBRANCH=$CI_MERGE_REQUEST_TARGET_BRANCH_NAME
readonly TARGET_REPO=$CI_ALPINE_REPO
apkgs=$(find package -type f -name "*.apk")
for apk in $apkgs; do
branch=$(echo $apk | awk -F '/' '{print $2}')
arch=$(echo $apk | awk -F '/' '{print $3}')
name=$(echo $apk | awk -F '/' '{print $4}')
echo "Sending $name of arch $arch to $TARGET_REPO/$BASEBRANCH/$branch"
return=$(curl -s --user $FORGE_REPO_USER:$FORGE_REPO_TOKEN --upload-file $apk $TARGET_REPO/$BASEBRANCH/$branch 2>&1)
echo $return
if [ "$return" == "package file already exists" ]; then
echo "Package already exists, refreshing..."
curl -s --user $FORGE_REPO_USER:$FORGE_REPO_TOKEN -X DELETE $TARGET_REPO/$BASEBRANCH/$branch/$arch/$name
curl -s --user $FORGE_REPO_USER:$FORGE_REPO_TOKEN --upload-file $apk $TARGET_REPO/$BASEBRANCH/$branch
fi
done

View file

@ -1,54 +0,0 @@
#!/bin/bash
TARGET_RELEASE=$1
shift
TARGET_ARCH=$@
CURRENT_ARCH=$(cat /etc/apk/arch)
if [ -z "$TARGET_RELEASE" ]; then
echo ">>> No target release specified, assumming edge"
TARGET_RELEASE=edge
fi
[[ "$TARGET_RELEASE" == "edge" ]] && TARGET_BRANCH=master || TARGET_BRANCH="${TARGET_RELEASE/v}-stable"
if [[ ! -d "aports-$TARGET_RELEASE" ]]; then
echo ">>> Fetching aports for $TARGET_RELEASE"
git init aports-$TARGET_RELEASE
git -C aports-$TARGET_RELEASE remote add origin https://gitlab.alpinelinux.org/alpine/aports
git -C aports-$TARGET_RELEASE fetch --depth 1 origin $TARGET_BRANCH
git -C aports-$TARGET_RELEASE checkout $TARGET_BRANCH
[[ $? -ne 0 ]] && { echo ">>> Git fetch failed, does your release exist?"; exit; } || true
fi
if [ -z "$TARGET_ARCH" ]; then
echo ">>> No arch specified, assuming target to all arches supported by upstream for release $TARGET_RELEASE"
TARGET_ARCH=$(cat aports-$TARGET_RELEASE/scripts/mkimg.minirootfs.sh | tr -d "\t" | awk -F "=" '{if($1=="arch"){print $2}}' | tr -d \" | sed "s| $CURRENT_ARCH||")
if [ -z "$TARGET_ARCH" ]; then
echo ">>> Could not compute arches that are supported, does your release exist?"
exit
fi
fi
. /usr/share/abuild/functions.sh
for arch in $TARGET_ARCH; do
if [[ "$(arch_to_hostspec $arch)" == "unknown" ]]; then
echo ">>> $arch not valid arch, please chose among the following"
sed -n '/^arch_to_hostspec/,/esac$/ {s/esac//;p;}' /usr/share/abuild/functions.sh | sed -e '/unknown/d' -e '/arch/d' -e '/case/d' -e "/$CURRENT_ARCH/d" | awk '{print $1}' | tr -d ')'
exit
fi
done
echo ">>> Targetting $TARGET_ARCH for cross generation"
(
cd aports-$TARGET_RELEASE/scripts
# this stops bootstrap from building the whole base system
sed -i 's|^msg "Cross building base system"|exit; msg "Cross building base system"|' bootstrap.sh
for arch in $TARGET_ARCH; do
echo ">>> Building cross-compilers for $arch"
./bootstrap.sh $arch
[[ $? -ne 0 ]] && { echo ">>> Cross-build failure"; exit; } || true
done
echo ">>> Building done"
)

View file

@ -1,54 +0,0 @@
on:
pull_request:
types: [ assigned, opened, synchronize, reopened ]
jobs:
build-aarch64:
runs-on: aarch64
container:
image: alpinelinux/alpine-gitlab-ci:latest
env:
CI_PROJECT_DIR: ${{ github.workspace }}
CI_DEBUG_BUILD: ${{ runner.debug }}
CI_MERGE_REQUEST_PROJECT_URL: ${{ github.server_url }}/${{ github.repository }}
CI_MERGE_REQUEST_TARGET_BRANCH_NAME: ${{ github.base_ref }}
steps:
- name: Environment setup
run: |
doas apk upgrade -a
doas apk add nodejs git patch curl
cd /etc/apk/keys
doas curl -JO https://ayakael.net/api/packages/forge/alpine/key
- name: Repo pull
uses: actions/checkout@v4
with:
fetch-depth: 500
- name: Package build
run: |
${{ github.workspace }}/.forgejo/bin/build.sh
touch packages/dummy
- name: Package upload
uses: forgejo/upload-artifact@v3
with:
name: package
path: packages
deploy-aarch64:
needs: [build-aarch64]
runs-on: aarch64
container:
image: alpine:latest
env:
CI_ALPINE_REPO: 'https://ayakael.net/api/packages/forge/alpine'
CI_MERGE_REQUEST_TARGET_BRANCH_NAME: ${{ github.base_ref }}
FORGE_REPO_TOKEN: ${{ secrets.FORGE_REPO_TOKEN }}
FORGE_REPO_USER: ${{ vars.FORGE_REPO_USER }}
steps:
- name: Setting up environment
run: apk add nodejs curl findutils git gawk
- name: Repo pull
uses: actions/checkout@v4
- name: Package download
uses: forgejo/download-artifact@v3
- name: Package deployment
run: ${{ github.workspace }}/.forgejo/bin/deploy.sh

View file

@ -1,59 +0,0 @@
on:
workflow_dispatch:
inputs:
target_arch:
description: 'target arch'
required: false
type: string
jobs:
build-cross:
runs-on: x86_64
container:
image: alpinelinux/alpine-gitlab-ci:latest
env:
CI_PROJECT_DIR: ${{ github.workspace }}
CI_DEBUG_BUILD: ${{ runner.debug }}
CI_MERGE_REQUEST_PROJECT_URL: ${{ github.server_url }}/${{ github.repository }}
CI_MERGE_REQUEST_TARGET_BRANCH_NAME: ${{ github.ref_name }}
steps:
- name: Environment setup
run: |
doas apk upgrade -a
doas apk add nodejs git patch curl bash
cd /etc/apk/keys
doas curl -JO https://ayakael.net/api/packages/forge/alpine/key
- name: Repo pull
uses: actions/checkout@v4
with:
fetch-depth: 500
- name: Package build
run: |
${{ github.workspace }}/.forgejo/bin/build.sh
${{ github.workspace }}/.forgejo/bin/generate-cross.sh ${{ github.ref_name }} ${{ inputs.target_arch }}
mv -v /home/buildozer/packages/main ${{ github.workspace }}/packages/cross
- name: Package upload
uses: forgejo/upload-artifact@v3
with:
name: package
path: packages
deploy-cross:
needs: [build-cross]
runs-on: x86_64
container:
image: alpine:latest
env:
CI_ALPINE_REPO: 'https://ayakael.net/api/packages/forge/alpine'
CI_MERGE_REQUEST_TARGET_BRANCH_NAME: ${{ github.ref_name }}
FORGE_REPO_TOKEN: ${{ secrets.FORGE_REPO_TOKEN }}
FORGE_REPO_USER: ${{ vars.FORGE_REPO_USER }}
steps:
- name: Setting up environment
run: apk add nodejs curl findutils git gawk
- name: Repo pull
uses: actions/checkout@v4
- name: Package download
uses: forgejo/download-artifact@v3
- name: Package deployment
run: ${{ github.workspace }}/.forgejo/bin/deploy.sh

View file

@ -1,54 +0,0 @@
on:
pull_request:
types: [ assigned, opened, synchronize, reopened ]
jobs:
build-x86_64:
runs-on: x86_64
container:
image: alpinelinux/alpine-gitlab-ci:latest
env:
CI_PROJECT_DIR: ${{ github.workspace }}
CI_DEBUG_BUILD: ${{ runner.debug }}
CI_MERGE_REQUEST_PROJECT_URL: ${{ github.server_url }}/${{ github.repository }}
CI_MERGE_REQUEST_TARGET_BRANCH_NAME: ${{ github.base_ref }}
steps:
- name: Environment setup
run: |
doas apk upgrade -a
doas apk add nodejs git patch curl
cd /etc/apk/keys
doas curl -JO https://ayakael.net/api/packages/forge/alpine/key
- name: Repo pull
uses: actions/checkout@v4
with:
fetch-depth: 500
- name: Package build
run: |
${{ github.workspace }}/.forgejo/bin/build.sh
touch packages/dummy
- name: Package upload
uses: forgejo/upload-artifact@v3
with:
name: package
path: packages
deploy-x86_64:
needs: [build-x86_64]
runs-on: x86_64
container:
image: alpine:latest
env:
CI_ALPINE_REPO: 'https://ayakael.net/api/packages/forge/alpine'
CI_MERGE_REQUEST_TARGET_BRANCH_NAME: ${{ github.base_ref }}
FORGE_REPO_TOKEN: ${{ secrets.FORGE_REPO_TOKEN }}
FORGE_REPO_USER: ${{ vars.FORGE_REPO_USER }}
steps:
- name: Setting up environment
run: apk add nodejs curl findutils git gawk
- name: Repo pull
uses: actions/checkout@v4
- name: Package download
uses: forgejo/download-artifact@v3
- name: Package deployment
run: ${{ github.workspace }}/.forgejo/bin/deploy.sh

View file

@ -1,28 +0,0 @@
on:
workflow_dispatch:
schedule:
- cron: '0 5 * * *'
jobs:
check-backports:
name: Check backports repo
runs-on: x86_64
container:
image: alpine:latest
env:
downstream: https://ayakael.net/api/packages/forge/alpine/v3.22/backports
ISSUE_TOKEN: ${{ secrets.issue_token }}
LABEL_NUMBER: 1
ALL_PACKAGES: true
steps:
- name: Environment setup
run: apk add grep coreutils gawk curl wget bash nodejs git jq sed
- name: Get scripts
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Check out-of-date packages
run: ${{ github.workspace }}/.forgejo/bin/check_ver.sh
- name: Create issues
run: ${{ github.workspace }}/.forgejo/bin/create_issue.sh

View file

@ -1,49 +0,0 @@
on:
workflow_dispatch:
schedule:
- cron: '0 5 * * *'
jobs:
check-community-edge:
name: Check community(edge) repo
runs-on: x86_64
container:
image: alpine:latest
env:
downstream: https://dl-cdn.alpinelinux.org/alpine/edge/community
ISSUE_TOKEN: ${{ secrets.issue_token }}
LABEL_NUMBER: 4
steps:
- name: Environment setup
run: apk add grep coreutils gawk curl wget bash nodejs git jq sed
- name: Get scripts
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Check out-of-date packages
run: ${{ github.workspace }}/.forgejo/bin/check_ver.sh
- name: Create issues
run: ${{ github.workspace }}/.forgejo/bin/create_issue.sh
check-community-3.22:
name: Check community(3.22) repo
runs-on: x86_64
container:
image: alpine:latest
env:
downstream: https://dl-cdn.alpinelinux.org/alpine/v3.22/community
ISSUE_TOKEN: ${{ secrets.issue_token }}
LABEL_NUMBER: 13
fix_only: all
skip_package: dotnet9-stage0 dotnet8-stage0 py3-boto3 py3-botocore
steps:
- name: Environment setup
run: apk add grep coreutils gawk curl wget bash nodejs git jq sed
- name: Get scripts
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Check out-of-date packages
run: ${{ github.workspace }}/.forgejo/bin/check_ver.sh
- name: Create issues
run: ${{ github.workspace }}/.forgejo/bin/create_issue.sh

View file

@ -1,111 +0,0 @@
on:
workflow_dispatch:
schedule:
- cron: '0 5 * * *'
jobs:
check-main-edge:
name: Check main(edge) repo
runs-on: x86_64
container:
image: alpine:latest
env:
downstream: https://dl-cdn.alpinelinux.org/alpine/edge/main
ISSUE_TOKEN: ${{ secrets.issue_token }}
LABEL_NUMBER: 4
steps:
- name: Environment setup
run: apk add grep coreutils gawk curl wget bash nodejs git jq sed
- name: Get scripts
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Check out-of-date packages
run: ${{ github.workspace }}/.forgejo/bin/check_ver.sh
- name: Create issues
run: ${{ github.workspace }}/.forgejo/bin/create_issue.sh
check-main-3.22:
name: Check main(3.22) repo
runs-on: x86_64
container:
image: alpine:latest
env:
downstream: https://dl-cdn.alpinelinux.org/alpine/v3.22/main
ISSUE_TOKEN: ${{ secrets.issue_token }}
LABEL_NUMBER: 13
fix_only: all
steps:
- name: Environment setup
run: apk add grep coreutils gawk curl wget bash nodejs git jq sed
- name: Get scripts
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Check out-of-date packages
run: ${{ github.workspace }}/.forgejo/bin/check_ver.sh
- name: Create issues
run: ${{ github.workspace }}/.forgejo/bin/create_issue.sh
check-main-3.21:
name: Check main(3.21) repo
runs-on: x86_64
container:
image: alpine:latest
env:
downstream: https://dl-cdn.alpinelinux.org/alpine/v3.21/main
ISSUE_TOKEN: ${{ secrets.issue_token }}
LABEL_NUMBER: 13
fix_only: all
steps:
- name: Environment setup
run: apk add grep coreutils gawk curl wget bash nodejs git jq sed
- name: Get scripts
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Check out-of-date packages
run: ${{ github.workspace }}/.forgejo/bin/check_ver.sh
- name: Create issues
run: ${{ github.workspace }}/.forgejo/bin/create_issue.sh
check-main-3.20:
name: Check main(3.20) repo
runs-on: x86_64
container:
image: alpine:latest
env:
downstream: https://dl-cdn.alpinelinux.org/alpine/v3.20/main
ISSUE_TOKEN: ${{ secrets.issue_token }}
LABEL_NUMBER: 13
fix_only: all
steps:
- name: Environment setup
run: apk add grep coreutils gawk curl wget bash nodejs git jq sed
- name: Get scripts
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Check out-of-date packages
run: ${{ github.workspace }}/.forgejo/bin/check_ver.sh
- name: Create issues
run: ${{ github.workspace }}/.forgejo/bin/create_issue.sh
check-main-3.19:
name: Check main(3.19) repo
runs-on: x86_64
container:
image: alpine:latest
env:
downstream: https://dl-cdn.alpinelinux.org/alpine/v3.19/main
ISSUE_TOKEN: ${{ secrets.issue_token }}
LABEL_NUMBER: 13
fix_only: all
steps:
- name: Environment setup
run: apk add grep coreutils gawk curl wget bash nodejs git jq sed
- name: Get scripts
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Check out-of-date packages
run: ${{ github.workspace }}/.forgejo/bin/check_ver.sh
- name: Create issues
run: ${{ github.workspace }}/.forgejo/bin/create_issue.sh

View file

@ -1,28 +0,0 @@
on:
workflow_dispatch:
schedule:
- cron: '0 5 * * *'
jobs:
check-community-pmos:
name: Check pmos(edge) repo
runs-on: x86_64
container:
image: alpine:latest
env:
downstream: http://mirror.postmarketos.org/postmarketos/master
ISSUE_TOKEN: ${{ secrets.issue_token }}
LABEL_NUMBER: 14
skip_package: device-clockworkpi-uconsole-radxa-cm5 device-pine64-pinenote u-boot-radxa-cm5
steps:
- name: Environment setup
run: apk add grep coreutils gawk curl wget bash nodejs git jq sed
- name: Get scripts
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Check out-of-date packages
run: ${{ github.workspace }}/.forgejo/bin/check_ver.sh
- name: Create issues
run: ${{ github.workspace }}/.forgejo/bin/create_issue.sh

View file

@ -1,28 +0,0 @@
on:
workflow_dispatch:
schedule:
- cron: '0 5 * * *'
jobs:
check-community:
name: Check testing repo
runs-on: x86_64
container:
image: alpine:latest
env:
downstream: https://dl-cdn.alpinelinux.org/alpine/edge/testing
ISSUE_TOKEN: ${{ secrets.issue_token }}
LABEL_NUMBER: 4
skip_package: dotnet6-stage0 dotnet6-build
steps:
- name: Environment setup
run: apk add grep coreutils gawk curl wget bash nodejs git jq sed
- name: Get scripts
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Check out-of-date packages
run: ${{ github.workspace }}/.forgejo/bin/check_ver.sh
- name: Create issues
run: ${{ github.workspace }}/.forgejo/bin/create_issue.sh

View file

@ -1,27 +0,0 @@
on:
workflow_dispatch:
schedule:
- cron: '0 5 * * *'
jobs:
check-user:
name: Check user repo
runs-on: x86_64
container:
image: alpine:latest
env:
downstream: https://ayakael.net/api/packages/forge/alpine/edge/user
ISSUE_TOKEN: ${{ secrets.issue_token }}
LABEL_NUMBER: 12
steps:
- name: Environment setup
run: apk add grep coreutils gawk curl wget bash nodejs git jq sed
- name: Get scripts
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Check out-of-date packages
run: ${{ github.workspace }}/.forgejo/bin/check_ver.sh
- name: Create issues
run: ${{ github.workspace }}/.forgejo/bin/create_issue.sh

View file

@ -1,25 +0,0 @@
on:
workflow_dispatch:
inputs:
target_repo:
description: 'target repo'
default: 'edge/user'
required: true
type: string
jobs:
clear-repo:
runs-on: x86_64
container:
image: alpine:latest
env:
TARGET_REPO: 'https://ayakael.net/api/packages/forge/alpine/${{ inputs.target_repo }}'
FORGE_REPO_TOKEN: ${{ secrets.FORGE_REPO_TOKEN }}
FORGE_REPO_USER: ${{ vars.FORGE_REPO_USER }}
steps:
- name: Setting up environment
run: apk add nodejs curl findutils git gawk
- name: Repo pull
uses: actions/checkout@v4
- name: Clear repo
run: ${{ github.workspace }}/.forgejo/bin/clear-repo.sh $TARGET_REPO

View file

@ -1,23 +0,0 @@
on:
pull_request:
types: [ assigned, opened, synchronize, reopened ]
jobs:
lint:
run-name: lint
runs-on: x86_64
container:
image: alpinelinux/apkbuild-lint-tools:latest
env:
CI_PROJECT_DIR: ${{ github.workspace }}
CI_DEBUG_BUILD: ${{ runner.debug }}
CI_MERGE_REQUEST_PROJECT_URL: ${{ github.server_url }}/${{ github.repository }}
CI_MERGE_REQUEST_TARGET_BRANCH_NAME: ${{ github.base_ref }}
steps:
- run: |
doas apk upgrade -a
doas apk add nodejs git
- uses: actions/checkout@v4
with:
fetch-depth: 500
- run: lint

60
.gitlab-ci.yml Normal file
View file

@ -0,0 +1,60 @@
stages:
- verify
- build
- deploy
variables:
GIT_STRATEGY: clone
GIT_DEPTH: "500"
lint:
stage: verify
interruptible: true
script:
- |
sudo apk add shellcheck atools doas abuild
export PATH="$PATH:$CI_PROJECT_DIR/.gitlab/bin"
lint
allow_failure: true
only:
- merge_requests
tags:
- apk-$CI_MERGE_REQUEST_TARGET_BRANCH_NAME
build:
stage: build
interruptible: true
script:
- |
sudo apk add alpine-sdk lua-aports doas
doas addgroup $USER abuild
export PATH="$PATH:$CI_PROJECT_DIR/.gitlab/bin"
sudo -Eu $USER build.sh
artifacts:
paths:
- packages/
- keys/
- logs/
expire_in: 7 days
when: always
only:
- merge_requests
tags:
- apk-$CI_MERGE_REQUEST_TARGET_BRANCH_NAME
push:
interruptible: true
stage: deploy
needs:
- job: build
artifacts: true
script:
- |
sudo apk add abuild git-lfs
export PATH="$PATH:$CI_PROJECT_DIR/.gitlab/bin"
push.sh
rules:
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
when: manual
tags:
- apk-$CI_MERGE_REQUEST_TARGET_BRANCH_NAME

111
.gitlab/bin/APKBUILD_SHIM Executable file
View file

@ -0,0 +1,111 @@
#!/bin/sh
set -e
arch=
builddir=
checkdepends=
depends=
depends_dev=
depends_doc=
depends_libs=
depends_openrc=
depends_static=
install=
install_if=
langdir=
ldpath=
license=
makedepends=
makedepends_build=
makedepends_host=
md5sums=
options=
patch_args=
pkgbasedir=
pkgdesc=
pkgdir=
pkgname=
pkgrel=
pkgver=
pkggroups=
pkgusers=
provides=
provider_priority=
replaces=
sha256sums=
sha512sums=
sonameprefix=
source=
srcdir=
startdir=
subpackages=
subpkgdir=
subpkgname=
triggers=
url=
# abuild.conf
CFLAGS=
CXXFLAGS=
CPPFLAGS=
LDFLAGS=
JOBS=
MAKEFLAGS=
CMAKE_CROSSOPTS=
. ./APKBUILD
: "$arch"
: "$builddir"
: "$checkdepends"
: "$depends"
: "$depends_dev"
: "$depends_doc"
: "$depends_libs"
: "$depends_openrc"
: "$depends_static"
: "$install"
: "$install_if"
: "$langdir"
: "$ldpath"
: "$license"
: "$makedepends"
: "$makedepends_build"
: "$makedepends_host"
: "$md5sums"
: "$options"
: "$patch_args"
: "$pkgbasedir"
: "$pkgdesc"
: "$pkgdir"
: "$pkgname"
: "$pkgrel"
: "$pkgver"
: "$pkggroups"
: "$pkgusers"
: "$provides"
: "$provider_priority"
: "$replaces"
: "$sha256sums"
: "$sha512sums"
: "$sonameprefix"
: "$source"
: "$srcdir"
: "$startdir"
: "$subpackages"
: "$subpkgdir"
: "$subpkgname"
: "$triggers"
: "$url"
# abuild.conf
: "$CFLAGS"
: "$CXXFLAGS"
: "$CPPFLAGS"
: "$LDFLAGS"
: "$JOBS"
: "$MAKEFLAGS"
: "$CMAKE_CROSSOPTS"

16
.gitlab/bin/apkbuild-shellcheck Executable file
View file

@ -0,0 +1,16 @@
#!/bin/sh
shellcheck -s ash \
-e SC3043 \
-e SC3057 \
-e SC3060 \
-e SC2016 \
-e SC2086 \
-e SC2169 \
-e SC2155 \
-e SC2100 \
-e SC2209 \
-e SC2030 \
-e SC2031 \
-e SC1090 \
-xa $CI_PROJECT_DIR/.gitlab/bin/APKBUILD_SHIM

View file

@ -1,23 +1,21 @@
#!/bin/sh
# shellcheck disable=SC3043
. /usr/local/lib/functions.sh
. $CI_PROJECT_DIR/.gitlab/bin/functions.sh
# shellcheck disable=SC3040
set -eu -o pipefail
readonly APORTSDIR=$CI_PROJECT_DIR
readonly REPOS="backports user pmos"
readonly REPOS="backports user"
readonly ALPINE_REPOS="main community testing"
readonly ARCH=$(apk --print-arch)
# gitlab variables
readonly BASEBRANCH=$CI_MERGE_REQUEST_TARGET_BRANCH_NAME
: "${REPODEST:=$HOME/packages}"
: "${MIRROR:=https://ayakael.net/api/packages/forge/alpine}"
: "${MIRROR:=https://lab.ilot.io/ayakael/repo-apk/-/raw}"
: "${ALPINE_MIRROR:=http://dl-cdn.alpinelinux.org/alpine}"
: "${PMOS_MIRROR:=http://mirror.postmarketos.org/postmarketos}"
: "${PMOS_KEY:=https://git.syndicate-lang.org/synit/pmbootstrap/raw/commit/8efee86388408c0d8de45c64fe383580ffd91700/pmb/data/keys/build.postmarketos.org.rsa.pub}"
: "${MAX_ARTIFACT_SIZE:=300000000}" #300M
: "${CI_DEBUG_BUILD:=}"
@ -72,7 +70,7 @@ report() {
get_release() {
case $BASEBRANCH in
v*) echo "$BASEBRANCH";;
v*) echo "${BASEBRANCH%-*}";;
edge) echo edge;;
*) die "Branch \"$BASEBRANCH\" not supported!"
esac
@ -82,7 +80,7 @@ build_aport() {
local repo="$1" aport="$2"
cd "$APORTSDIR/$repo/$aport"
if abuild -r 2>&1 | report "build-$aport"; then
checkapk 2>&1 | report "checkapk-$aport" || true
checkapk | report "checkapk-$aport" || true
aport_ok="$aport_ok $repo/$aport"
else
aport_ng="$aport_ng $repo/$aport"
@ -104,13 +102,11 @@ set_repositories_for() {
release=$(get_release)
for repo in $REPOS; do
[ "$repo" = "non-free" ] && continue
[ "$release" == "edge" ] && [ "$repo" == "backports" ] && continue
repos="$repos $MIRROR/$release/$repo $REPODEST/$repo"
[ "$repo" = "$target_repo" ] && break
done
doas sh -c "printf '%s\n' $repos >> /etc/apk/repositories"
doas apk update || true
doas apk update
}
apply_offset_limit() {
@ -131,30 +127,19 @@ setup_system() {
repos="$repos $ALPINE_MIRROR/$release/$repo"
done
doas sh -c "printf '%s\n' $repos > /etc/apk/repositories"
doas apk -U upgrade -a || apk fix || die "Failed to up/downgrade system"
abuild-keygen -ain
doas apk -U upgrade -a || doas apk fix || die "Failed to up/downgrade system"
gitlab_key_to_rsa $ABUILD_KEY rsa-private $HOME/.abuild/$ABUILD_KEY_NAME.rsa
gitlab_key_to_rsa $ABUILD_KEY_PUB rsa-public $HOME/.abuild/$ABUILD_KEY_NAME.rsa.pub
chmod 700 $HOME/.abuild/$ABUILD_KEY_NAME.rsa
echo "PACKAGER_PRIVKEY=$HOME/.abuild/$ABUILD_KEY_NAME.rsa" >> $HOME/.abuild/abuild.conf
doas cp $HOME/.abuild/$ABUILD_KEY_NAME.rsa.pub /etc/apk/keys/$ABUILD_KEY_NAME.rsa.pub
doas sed -i -E 's/export JOBS=[0-9]+$/export JOBS=$(nproc)/' /etc/abuild.conf
( . /etc/abuild.conf && echo "Building with $JOBS jobs" )
mkdir -p "$REPODEST"
git config --global init.defaultBranch master
}
setup_pmos() {
local release
case $BASEBRANCH in
v3.21) release="v24.12";;
v3.20) release="v24.6";;
v3.19) release="v23.12";;
edge) release=master;;
*) die "Branch \"$BASEBRANCH\" not supported!"
esac
doas wget "$PMOS_KEY" -P /etc/apk/keys
doas sh -c "echo $PMOS_MIRROR/$release >> /etc/apk/repositories"
doas apk update || true
}
sysinfo() {
printf ">>> Host system information (arch: %s, release: %s) <<<\n" "$ARCH" "$(get_release)"
printf "- Number of Cores: %s\n" "$(nproc)"
@ -162,7 +147,6 @@ sysinfo() {
printf "- Free space: %s\n" "$(df -hP / | awk '/\/$/ {print $4}')"
}
copy_artifacts() {
cd "$APORTSDIR"
@ -199,9 +183,9 @@ sysinfo || true
setup_system || die "Failed to setup system"
# git no longer allows to execute in repositories owned by different users
doas chown -R buildozer: .
doas chown -R $USER: .
fetch_flags="-qnu"
fetch_flags="-qn"
debugging && fetch_flags="-v"
git fetch $fetch_flags "$CI_MERGE_REQUEST_PROJECT_URL" \
@ -222,8 +206,8 @@ build_start=$CI_ALPINE_BUILD_OFFSET
build_limit=$CI_ALPINE_BUILD_LIMIT
for repo in $(changed_repos); do
mkdir -p "$APORTSDIR"/logs "$APORTSDIR"/packages "$APORTSDIR"/keys
set_repositories_for "$repo"
[ "$repo" == "pmos" ] && setup_pmos
built_aports=0
changed_aports_in_repo=$(changed_aports "$repo")
changed_aports_in_repo_count=$(echo "$changed_aports_in_repo" | wc -l)
@ -277,4 +261,3 @@ if [ "$failed" = true ]; then
elif [ -z "$aport_ok" ]; then
msg "No packages found to be built." yellow
fi

20
.gitlab/bin/changed-aports Executable file
View file

@ -0,0 +1,20 @@
#!/bin/sh
if [ $# -lt 1 ]; then
echo "Usage: $0 <basebranch>"
exit 1
fi
if ! git rev-parse --is-inside-work-tree >/dev/null 2>&1; then
echo "Fatal: not inside a git repository"
exit 2
fi
basebranch=$1
if ! git rev-parse --verify --quiet $basebranch >/dev/null; then
# The base branch does not eixst, probably due to a shallow clone
git fetch -v $CI_MERGE_REQUEST_PROJECT_URL.git +refs/heads/$basebranch:refs/heads/$basebranch
fi
git --no-pager diff --diff-filter=ACMR --name-only $basebranch...HEAD -- "*/APKBUILD" | xargs -r -n1 dirname

74
.gitlab/bin/functions.sh Executable file
View file

@ -0,0 +1,74 @@
# shellcheck disable=SC3043
:
# shellcheck disable=SC3040
set -eu -o pipefail
changed_repos() {
: "${APORTSDIR?APORTSDIR missing}"
: "${BASEBRANCH?BASEBRANCH missing}"
cd "$APORTSDIR"
for repo in $REPOS; do
git diff --diff-filter=ACMR --exit-code "$BASEBRANCH"...HEAD -- "$repo" >/dev/null \
|| echo "$repo"
done
}
changed_aports() {
: "${APORTSDIR?APORTSDIR missing}"
: "${BASEBRANCH?BASEBRANCH missing}"
cd "$APORTSDIR"
local repo="$1"
local aports
aports=$(git diff --name-only --diff-filter=ACMR --relative="$repo" \
"$BASEBRANCH"...HEAD -- "*/APKBUILD" | xargs -rn1 dirname)
# shellcheck disable=2086
ap builddirs -d "$APORTSDIR/$repo" $aports 2>/dev/null | xargs -rn1 basename
}
section_start() {
name=${1?arg 1 name missing}
header=${2?arg 2 header missing}
collapsed=$2
timestamp=$(date +%s)
options=""
case $collapsed in
yes|on|collapsed|true) options="[collapsed=true]";;
esac
printf "\e[0Ksection_start:%d:%s%s\r\e[0K%s\n" "$timestamp" "$name" "$options" "$header"
}
section_end() {
name=$1
timestamp=$(date +%s)
printf "\e[0Ksection_end:%d:%s\r\e[0K" "$timestamp" "$name"
}
gitlab_key_to_rsa() {
KEY=$1
TYPE=$2
TGT=$3
TGT_DIR=${TGT%/*}
if [ "$TGT" == "$TGT_DIR" ]; then
TGT_DIR="./"
fi
if [ ! -d "$TGT_DIR" ]; then
mkdir -p "$TGT_DIR"
fi
case $TYPE in
rsa-public) local type="PUBLIC";;
rsa-private) local type="RSA PRIVATE";;
esac
echo "-----BEGIN $type KEY-----" > "$TGT"
echo $1 | sed 's/.\{64\}/&\
/g' >> "$TGT"
echo "-----END $type KEY-----" >> "$TGT"
}

96
.gitlab/bin/lint Executable file
View file

@ -0,0 +1,96 @@
#!/bin/sh
BLUE="\e[34m"
MAGENTA="\e[35m"
RESET="\e[0m"
readonly BASEBRANCH=$CI_MERGE_REQUEST_TARGET_BRANCH_NAME
verbose() {
echo "> " "$@"
# shellcheck disable=SC2068
$@
}
debugging() {
[ -n "$CI_DEBUG_BUILD" ]
}
debug() {
if debugging; then
verbose "$@"
fi
}
# git no longer allows to execute in repositories owned by different users
doas chown -R gitlab-runner: .
fetch_flags="-qn"
debugging && fetch_flags="-v"
git fetch $fetch_flags "$CI_MERGE_REQUEST_PROJECT_URL" \
"+refs/heads/$BASEBRANCH:refs/heads/$BASEBRANCH"
if debugging; then
merge_base=$(git merge-base "$BASEBRANCH" HEAD)
echo "$merge_base"
git --version
git config -l
git tag merge-base "$merge_base" || { echo "Could not determine merge-base"; exit 50; }
git log --oneline --graph --decorate --all
fi
has_problems=0
for PKG in $(changed-aports "$BASEBRANCH"); do
printf "$BLUE==>$RESET Linting $PKG\n"
(
cd "$PKG"
repo=$(basename $(dirname $PKG));
if [ "$repo" = "main" ]; then
export SKIP_AL1=1
export SKIP_AL13=1
fi
printf "\n\n"
printf "$BLUE"
printf '======================================================\n'
printf " parse APKBUILD:\n"
printf '======================================================'
printf "$RESET\n\n"
( . ./APKBUILD ) || has_problems=1
printf "\n\n"
printf "$BLUE"
printf '======================================================\n'
printf " abuild sanitycheck:\n"
printf '======================================================'
printf "$RESET\n\n"
abuild sanitycheck || has_problems=1
printf "\n\n"
printf "$BLUE"
printf '======================================================\n'
printf " apkbuild-shellcheck:\n"
printf '======================================================'
printf "$RESET\n"
apkbuild-shellcheck || has_problems=1
printf "\n\n"
printf "$BLUE"
printf '======================================================\n'
printf " apkbuild-lint:\n"
printf '======================================================'
printf "$RESET\n\n"
apkbuild-lint APKBUILD || has_problems=1
return $has_problems
) || has_problems=1
echo
done
exit $has_problems

47
.gitlab/bin/push.sh Executable file
View file

@ -0,0 +1,47 @@
#!/bin/sh
# shellcheck disable=SC3043
. $CI_PROJECT_DIR/.gitlab/bin/functions.sh
# shellcheck disable=SC3040
set -eu -o pipefail
readonly APORTSDIR=$CI_PROJECT_DIR
readonly REPOS="backports user"
readonly BASEBRANCH=$CI_MERGE_REQUEST_TARGET_BRANCH_NAME
export GIT_SSH_COMMAND="ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
gitlab_key_to_rsa $ABUILD_KEY rsa-private $HOME/.abuild/$ABUILD_KEY_NAME.rsa
gitlab_key_to_rsa $ABUILD_KEY_PUB rsa-public $HOME/.abuild/$ABUILD_KEY_NAME.rsa.pub
gitlab_key_to_rsa $SSH_KEY rsa-private $HOME/.ssh/id_rsa
chmod 700 "$HOME"/.ssh/id_rsa
chmod 700 "$HOME"/.abuild/$ABUILD_KEY_NAME.rsa
echo "PACKAGER_PRIVKEY=$HOME/.abuild/$ABUILD_KEY_NAME.rsa" >> $HOME/.abuild/abuild.conf
echo "REPODEST=$CI_PROJECT_DIR/repo-apk" >> $HOME/.abuild/abuild.conf
doas cp $HOME/.abuild/$ABUILD_KEY_NAME.rsa.pub /etc/apk/keys/.
git clone git@lab.ilot.io:ayakael/repo-apk -b $BASEBRANCH
for i in $(find packages -type f -name "*.apk"); do
cp $i ${i/packages/repo-apk}
done
fetch_flags="-qn"
git fetch $fetch_flags "$CI_MERGE_REQUEST_PROJECT_URL" \
"+refs/heads/$BASEBRANCH:refs/heads/$BASEBRANCH"
for repo in $(changed_repos); do
rm $CI_PROJECT_DIR/repo-apk/$repo/x86_64/APKINDEX.tar.gz
mkdir -p $repo/DUMMY
echo "pkgname=DUMMY" > $repo/DUMMY/APKBUILD
cd $repo/DUMMY
abuild index
cd "$CI_PROJECT_DIR"
rm -R $repo/DUMMY
done
git -C repo-apk add .
git -C repo-apk commit -m "Update from $CI_MERGE_REQUEST_IID - $CI_MERGE_REQUEST_TITLE"
git -C repo-apk push

View file

@ -1,63 +1,5 @@
# ayaports
Upstream: https://ayakael.net/forge/ayaports
Repository with various custom APKBUILDs.
## Description
Use at your own risk. While they will likely work, as I built them for my own use, I cannot guarantee that they'll stay up to date or that they won't burn your house down.
This repository contains aports that are not yet merged in the official Alpine
Linux repository or dont adhere to Alpine polices. Packages are automatically
built using CI. Once built, they are deployed to a Forgejo repository, making
them available to apk.
Branches are matched to Alpine releases.
## Repositories
You can browse all the repositories at https://ayakael.net/forge/ayaports
Affixed to each repository description is the appropriate link for use in
`/etc/apk/repositories`.
#### Backports
```
https://ayakael.net/api/packages/forge/alpine/v3.21/backports
```
Aports from the official Alpine repositories backported from edge. This is only
available and kept up-to-date on latest stable release.
#### User
```
https://ayakael.net/api/packages/forge/alpine/edge/user
```
Aports that have yet to be (or may never be) upstreamed to the official
aports.
## How to use
Add security key of the apk repository to your /etc/apk/keys:
```shell
cd /etc/apk/keys
curl -JO https://ayakael.net/api/packages/forge/alpine/key
```
Add repositories that you want to use (see above) to `/etc/apk/repositories`.
## Support
Generally, only the latest branch is kept up-to-date. That said, if an aport
is broken on the latest release due to a dependency incompatibility, it will be
kept up-to-date on the release it last works on.
As these aports are built for my own application, I make no guarantees that
they will work for you.
## Contribution & bug reports
If you wish to contribute to this aports collection, or wish to report a bug,
you can do so on Codeberg here:
https://codeberg.org/ayakael/ayaports/issues
For packages that are in backports, bug reports and merge requests
should be done on Alpine's aports repo instance:
https://gitlab.alpinelinux.org/alpine/aports
## License
This readme, abuilds and support scripts are licensed under MIT License.
One of these days I'll PR them to Alpine's package repository.

View file

@ -0,0 +1,46 @@
# Contributor: Kay Thomas <kaythomas@pm.me>
# Maintainer: Kay Thomas <kaythomas@pm.me>
pkgname=airsonic-advanced
_sha=89e6fe9aec80daf8224c6696f8f86bfcf22c6e0a
pkgver=11.0.0_git20220624
pkgrel=1
pkgdesc="Modern implementation of the Airsonic fork with several key performance and feature enhancements"
url="https://github.com/airsonic-advanced/airsonic-advanced"
# inconsistent test and build failures on other arches
arch="x86_64"
license="GPL-3.0-or-later"
depends="openjdk11"
makedepends="maven"
subpackages="$pkgname-openrc"
pkgusers="airsonic-advanced"
pkggroups="airsonic-advanced"
install="$pkgname.pre-install"
source="$pkgname-$pkgver.tar.gz::https://github.com/airsonic-advanced/airsonic-advanced/archive/$_sha.tar.gz
airsonic-advanced.initd
"
builddir="$srcdir/$pkgname-$_sha"
build() {
mvn clean package -DskipTests
}
check() {
mvn test
}
package() {
install -dm755 -o airsonic-advanced -g airsonic-advanced \
"$pkgdir"/var/airsonic
install -m755 -o airsonic-advanced -g airsonic-advanced \
"$builddir"/airsonic-main/target/airsonic.war \
"$pkgdir"/var/airsonic/airsonic.war
install -Dm755 "$srcdir"/$pkgname.initd \
"$pkgdir"/etc/init.d/$pkgname
}
sha512sums="
b794e6e6264613423fc2c7dec383d9a14a74a45a13efe7347c02b51a46e8debdd92fae35a6711540f04fa624ded77b9f377d54c3468c8c419380f1b133d4e3cd airsonic-advanced-11.0.0_git20220624.tar.gz
ca87e6a7199950e6ac52aeb076a03f831d60ee9d4ceed47366bbd78443765d205796d895ebb244051d8033e5b2e9ccd648d20434039c854b8b50e766cc5cd10d airsonic-advanced.initd
"

View file

@ -0,0 +1,14 @@
#!/sbin/openrc-run
supervisor=supervise-daemon
name="airsonic-advanced"
command="/usr/lib/jvm/java-11-openjdk/jre/bin/java"
command_args="-jar airsonic.war"
command_user="airsonic-advanced:airsonic-advanced"
directory="/var/airsonic"
pidfile="/run/airsonic-advanced.pid"
depend() {
need net localmount
after firewall
}

View file

@ -0,0 +1,6 @@
#!/bin/sh
addgroup -S airsonic-advanced 2>/dev/null
adduser -S -D -H -s /sbin/nologin -G airsonic-advanced -g airsonic-advanced airsonic-advanced 2>/dev/null
exit 0

View file

@ -1,25 +0,0 @@
diff --color -Nur calibre-6.17.0.orig/src/calibre/gui2/update.py calibre-6.17.0/src/calibre/gui2/update.py
--- calibre-6.17.0.orig/src/calibre/gui2/update.py 2023-05-06 11:36:35.678461036 -0700
+++ calibre-6.17.0/src/calibre/gui2/update.py 2023-05-06 11:39:10.365134930 -0700
@@ -82,20 +82,6 @@
while not self.shutdown_event.is_set():
calibre_update_version = NO_CALIBRE_UPDATE
plugins_update_found = 0
- try:
- version = get_newest_version()
- if version[:2] > numeric_version[:2]:
- calibre_update_version = version
- except Exception as e:
- prints('Failed to check for calibre update:', as_unicode(e))
- try:
- update_plugins = get_plugin_updates_available(raise_error=True)
- if update_plugins is not None:
- plugins_update_found = len(update_plugins)
- except Exception as e:
- prints('Failed to check for plugin update:', as_unicode(e))
- if calibre_update_version != NO_CALIBRE_UPDATE or plugins_update_found > 0:
- self.signal.update_found.emit(calibre_update_version, plugins_update_found)
self.shutdown_event.wait(self.INTERVAL)
def shutdown(self):

View file

@ -1,119 +0,0 @@
# Maintainer: Cowington Post <cowingtonpost@gmail.com>
pkgname=calibre
pkgver=8.4.0
pkgrel=0
pkgdesc="Ebook management application"
# qt6-webengine
arch="x86_64 aarch64"
url="https://calibre-ebook.com"
license="GPL-3.0-or-later"
depends="
font-liberation
libwmf
mtdev
optipng
poppler
py3-apsw
py3-beautifulsoup4
py3-css-parser
py3-cssselect
py3-dateutil
py3-dnspython
py3-feedparser
py3-fonttools
py3-html2text
py3-html5-parser
py3-html5lib
py3-jeepney
py3-lxml
py3-markdown
py3-mechanize
py3-msgpack
py3-netifaces
py3-pillow
py3-psutil
py3-pycryptodome
py3-pygments
py3-pyqt6-webengine
py3-regex
py3-xxhash
py3-zeroconf
qt6-qtimageformats
qt6-qtsvg
qt6-qtwebengine
qt6-qtbase-private-dev
udisks2
"
makedepends="
cmake
curl
hunspell-dev
hyphen-dev
libmtp-dev
libstemmer-dev
libusb-dev
podofo-dev
py3-pyqt-builder
py3-pyqt6-sip
py3-sip
python3-dev
qt6-qtbase-dev
uchardet-dev
xdg-utils
ffmpeg-dev
"
subpackages="
$pkgname-pyc
$pkgname-doc
$pkgname-bash-completion
$pkgname-zsh-completion
"
source="https://download.calibre-ebook.com/$pkgver/calibre-$pkgver.tar.xz
0001-$pkgname-no-update.patch
musl-pread.patch
"
# net: downloads iso-codes
# !check: no tests ran
options="net !check"
export LANG="en_US.UTF-8"
prepare() {
default_prepare
rm -f resources/calibre-portable.*
}
build() {
python3 setup.py build
python3 setup.py iso639
python3 setup.py iso3166
python3 setup.py liberation_fonts --system-liberation_fonts --path-to-liberation_fonts /usr/share/fonts/liberation
python3 setup.py mathjax
python3 setup.py gui
}
check() {
python3 -m unittest discover
}
package() {
# needed for zsh
mkdir -p "$pkgdir"/usr/share/zsh/site-functions
python3 setup.py install \
--staging-root="$pkgdir"/usr \
--system-plugins-location=/usr/share/calibre/system-plugins
cp -a man-pages/ "$pkgdir"/usr/share/man
rm -r "$pkgdir"/usr/share/calibre/rapydscript/
python3 -m compileall -fq "$pkgdir"/usr
}
sha512sums="
df998fa31b9e581739872a649669fccf29f34d3ac1b4d0a96c37e08a0b049b1357b56a2af25f2733936e78901dd61b38a24e536e107e7094ada7e60a5c2c56ab calibre-8.4.0.tar.xz
eb8e7ce40ff8b8daf6e7e55a5dff8ec4dff06c45744266bb48b3194e92ab1196bc91468203e3c2ca1e5144166a7d6be90e6cf0253513e761b56a4c85be4c2c76 0001-calibre-no-update.patch
d27d29c434a3d2df1b18125225a4d3762bf6fdba77385b377b18a7f325f29ae0b698974a39263f4f7aed8a368c87d0dc4446f488505b8e38664f8e9ee5b9bd12 musl-pread.patch
"

View file

@ -1,11 +0,0 @@
--- a/src/calibre/utils/speedup.c
+++ b/src/calibre/utils/speedup.c
@@ -748,7 +748,7 @@
break;
}
#else
-#ifdef __linux__
+#ifdef __GLIBC__
ssize_t nr = pread64(fd, buf + pos, n - pos, offset);
#else
ssize_t nr = pread(fd, buf + pos, n - pos, offset);

View file

@ -1,2 +0,0 @@
#!/bin/sh
/usr/bin/electron "/usr/lib/caprine"

48
backports/celery/APKBUILD Normal file
View file

@ -0,0 +1,48 @@
# Maintainer: Drew DeVault <sir@cmpwn.com>
pkgname=celery
pkgver=5.2.7
pkgrel=0
pkgdesc="An asynchronous task queue/job queue based on distributed message passing"
url="http://www.celeryproject.org/"
arch="noarch !s390x" # lmited by py3-kombu
license="Apache-2.0"
depends="py3-urllib3 py3-redis py3-vine py3-kombu py3-billiard py3-tz"
makedepends="python3-dev py3-setuptools"
install="$pkgname.pre-install"
source="https://files.pythonhosted.org/packages/source/c/celery/celery-$pkgver.tar.gz
celery.confd
celery.initd"
pkgusers="celery"
pkggroups="celery"
subpackages="$pkgname-openrc"
provides="py3-celery=$pkgver-r$pkgrel"
# TODO: requires many many many dependencies
options="!check"
build() {
python3 setup.py build
}
package() {
install -dm755 "$pkgdir/$confdir"
install -dm755 "$pkgdir/$confdir/conf.d"
# install scripts
install -m755 -D "$srcdir"/$pkgname.initd \
"$pkgdir"/etc/init.d/$pkgname
install -m644 -D "$srcdir"/$pkgname.confd \
"$pkgdir"/etc/conf.d/$pkgname
python3 setup.py install --prefix=/usr --root="$pkgdir"
}
check() {
python3 setup.py test
}
sha512sums="
68e3bb082f97ebe20391293cc8fa96c41c8f5ac5e8c24b2b7bd66eb104ec459bdfa49741e47486676e5daa88d7a71e3eb0d9432851aeafc74b0d4352e567e853 celery-5.2.7.tar.gz
f9458bce0d8990de646df564bec96baa0e45867f44e41380d38520905e00c941b1ce261314bb78edaa14c591e0aa9386d24c58a61f69fb0fecc616c34a24dea1 celery.confd
ff8c0451efa7157fd61f2335f4187bef6cbdd51856c7cfad4de02244c6c5ca7c584f9108731b52f020fee866365d9f092ded266c90d13cb34e92a7ffb63fed57 celery.initd
"

View file

@ -0,0 +1,4 @@
CELERY_USER=celery
CELERY_LOG=/var/log/celery.log
CELERY_REDIR="1>/dev/null 2>> ${CELERY_LOG}"
CELERY_OPTS="-A celeryapp -B --loglevel=info $CELERY_REDIR"

View file

@ -0,0 +1,16 @@
#!/sbin/openrc-run
supervisor=supervise-daemon
description="celery queue worker"
: ${CELERY_USER:="celery"}
: ${CELERY_GROUP:="$(id -gn $LS_USER)"}
pidfile="/run/$RC_SVCNAME.sd.pid"
supervise_daemon_args="-u $CELERY_USER -g $CELERY_GROUP -p $pidfile"
command=/usr/bin/celery
command_args="${CELERY_OPTS}"
depends() {
use net
}

View file

@ -0,0 +1,7 @@
#!/bin/sh
addgroup -S celery 2>/dev/null
adduser -S -D -H -h /usr/share/celery -s /sbin/nologin -G celery -g celery celery 2>/dev/null
exit 0

View file

@ -1,35 +0,0 @@
# Contributor: lauren n. liberda <lauren@selfisekai.rocks>
# Maintainer: lauren n. liberda <lauren@selfisekai.rocks>
pkgname=electron-tasje
pkgver=0.7.3
pkgrel=0
pkgdesc="Tiny replacement for electron-builder"
url="https://codeberg.org/selfisekai/electron_tasje/"
arch="aarch64 x86_64" # only useful on platforms with electron
license="Apache-2.0 OR MIT"
makedepends="cargo cargo-auditable"
source="electron_tasje-${_rev:-"$pkgver"}.tar.gz::https://codeberg.org/selfisekai/electron_tasje/archive/${_rev:-"v$pkgver"}.tar.gz"
builddir="$srcdir/electron_tasje"
prepare() {
default_prepare
cargo fetch --target="$CTARGET" --locked
}
build() {
cargo auditable build --frozen --release
}
check() {
cargo test
}
package() {
install -Dm755 target/release/tasje "$pkgdir"/usr/bin/tasje
}
sha512sums="
251b7eabe74acdb5c7394f9d4d735b736acf555352785a9896ddaeed37632b238e823e1bb639e1f5a44a50455957ec41e1a585a3b2a9919b5818bb40843bd877 electron_tasje-0.7.3.tar.gz
"

View file

@ -1,730 +0,0 @@
From c854a92a215d0cf39c704bbadd3611e552073d5f Mon Sep 17 00:00:00 2001
From: Collin Baker <collinbaker@chromium.org>
Date: Fri, 4 Apr 2025 14:08:18 -0700
Subject: [PATCH] Reland "Use #[global_allocator] to provide Rust allocator
implementation"
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This is a reland of commit cfa3beef52625e03ba6ce2b2ac98e1b89dde5cdb
Original was reverted due to a cronet gn2bp failure. The script
filtered out GN rules in //build/rust/std, but this caused an exception
when //build/rust/std:allocator was referenced later.
Moving the rules to //build/rust/allocator sidesteps the issue.
Original change's description:
> Use #[global_allocator] to provide Rust allocator implementation
>
> The allocator shim hack we have been using no longer works with
> upstream Rust. Replace it with a less-unsupported method: provide a
> https://github.com/rust-lang/rust/issues/123015, which still requires
> us to provide a few symbol definitions.
>
> Bug: 408221149, 407024458
> Change-Id: If1808ca24b12dc80ead35a25521313a3d2e148d5
>
> Cq-Include-Trybots: luci.chromium.try:android-rust-arm32-rel,android-rust-arm64-dbg,android-rust-arm64-rel,linux-rust-x64-dbg,linux-rust-x64-rel,mac-rust-x64-dbg,win-rust-x64-dbg,win-rust-x64-rel
> Change-Id: If1808ca24b12dc80ead35a25521313a3d2e148d5
> Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/6427855
> Reviewed-by: Alan Zhao <ayzhao@google.com>
> Reviewed-by: Lei Zhang <thestig@chromium.org>
> Reviewed-by: Łukasz Anforowicz <lukasza@chromium.org>
> Commit-Queue: Collin Baker <collinbaker@chromium.org>
> Auto-Submit: Collin Baker <collinbaker@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#1442472}
Bug: 408221149, 407024458
Cq-Include-Trybots: luci.chromium.try:android-rust-arm32-rel,android-rust-arm64-dbg,android-rust-arm64-rel,linux-rust-x64-dbg,linux-rust-x64-rel,mac-rust-x64-dbg,win-rust-x64-dbg,win-rust-x64-rel
Change-Id: I36fef217297bfe64ae81519be24b8c653f6fdfa1
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/6432410
Reviewed-by: Mohannad Farrag <aymanm@google.com>
Reviewed-by: Łukasz Anforowicz <lukasza@chromium.org>
Auto-Submit: Collin Baker <collinbaker@chromium.org>
Commit-Queue: Łukasz Anforowicz <lukasza@chromium.org>
Cr-Commit-Position: refs/heads/main@{#1442922}
---
build/rust/allocator/BUILD.gn | 90 ++++++++++++++++
build/rust/{std => allocator}/alias.cc | 4 +-
build/rust/{std => allocator}/alias.h | 6 +-
.../allocator_impls.cc} | 100 ++++++++----------
build/rust/allocator/allocator_impls.h | 25 +++++
.../allocator/allocator_shim_definitions.cc | 30 ++++++
.../{std => allocator}/compiler_specific.h | 6 +-
.../rust/{std => allocator}/immediate_crash.h | 6 +-
build/rust/allocator/lib.rs | 48 +++++++++
build/rust/cargo_crate.gni | 9 ++
build/rust/rust_macro.gni | 3 +
build/rust/rust_target.gni | 4 +
build/rust/std/BUILD.gn | 41 -------
components/cronet/android/dependencies.txt | 1 +
third_party/breakpad/BUILD.gn | 10 +-
15 files changed, 272 insertions(+), 111 deletions(-)
create mode 100644 build/rust/allocator/BUILD.gn
rename build/rust/{std => allocator}/alias.cc (87%)
rename build/rust/{std => allocator}/alias.h (91%)
rename build/rust/{std/remap_alloc.cc => allocator/allocator_impls.cc} (67%)
create mode 100644 build/rust/allocator/allocator_impls.h
create mode 100644 build/rust/allocator/allocator_shim_definitions.cc
rename build/rust/{std => allocator}/compiler_specific.h (87%)
rename build/rust/{std => allocator}/immediate_crash.h (97%)
create mode 100644 build/rust/allocator/lib.rs
diff --git a/build/rust/allocator/BUILD.gn b/build/rust/allocator/BUILD.gn
new file mode 100644
index 0000000000000..06aa47f097c9c
--- /dev/null
+++ b/build/rust/allocator/BUILD.gn
@@ -0,0 +1,90 @@
+# Copyright 2025 The Chromium Authors
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/buildflag_header.gni")
+import("//build/config/rust.gni")
+import("//build/rust/rust_static_library.gni")
+
+rust_allocator_uses_partition_alloc = false
+if (build_with_chromium) {
+ import("//base/allocator/partition_allocator/partition_alloc.gni")
+ rust_allocator_uses_partition_alloc = use_partition_alloc_as_malloc
+}
+
+buildflag_header("buildflags") {
+ header = "buildflags.h"
+ flags = [
+ "RUST_ALLOCATOR_USES_PARTITION_ALLOC=$rust_allocator_uses_partition_alloc",
+ ]
+ visibility = [ ":*" ]
+}
+
+if (toolchain_has_rust) {
+ # All targets which depend on Rust code but are not linked by rustc must
+ # depend on this. Usually, this dependency will come from the rust_target() GN
+ # template. However, cargo_crate() does *not* include this dependency so any
+ # C++ targets which directly depend on a cargo_crate() must depend on this.
+ rust_static_library("allocator") {
+ sources = [ "lib.rs" ]
+ crate_root = "lib.rs"
+ cxx_bindings = [ "lib.rs" ]
+
+ deps = [
+ ":allocator_impls",
+ ":allocator_shim_definitions",
+ ]
+
+ no_chromium_prelude = true
+ no_allocator_crate = true
+ allow_unsafe = true
+ }
+
+ static_library("allocator_impls") {
+ public_deps = []
+ if (rust_allocator_uses_partition_alloc) {
+ public_deps += [ "//base/allocator/partition_allocator:partition_alloc" ]
+ }
+
+ sources = [
+ "allocator_impls.cc",
+ "allocator_impls.h",
+ ]
+
+ deps = [
+ ":allocator_cpp_shared",
+ ":buildflags",
+
+ # TODO(crbug.com/408221149): remove the C++ -> Rust dependency for the
+ # default allocator.
+ "//build/rust/std",
+ ]
+
+ visibility = [ ":*" ]
+ }
+
+ source_set("allocator_shim_definitions") {
+ sources = [ "allocator_shim_definitions.cc" ]
+
+ deps = [ ":allocator_cpp_shared" ]
+
+ visibility = [ ":*" ]
+ }
+
+ source_set("allocator_cpp_shared") {
+ sources = [
+ # `alias.*`, `compiler_specific.h`, and `immediate_crash.*` have been
+ # copied from `//base`.
+ # TODO(crbug.com/40279749): Avoid duplication / reuse code.
+ "alias.cc",
+ "alias.h",
+ "compiler_specific.h",
+ "immediate_crash.h",
+ ]
+
+ visibility = [
+ ":allocator_impls",
+ ":allocator_shim_definitions",
+ ]
+ }
+}
diff --git a/build/rust/std/alias.cc b/build/rust/allocator/alias.cc
similarity index 87%
rename from build/rust/std/alias.cc
rename to build/rust/allocator/alias.cc
index 42febac3ed1fc..ca20986f8ed49 100644
--- a/build/rust/std/alias.cc
+++ b/build/rust/allocator/alias.cc
@@ -7,9 +7,9 @@
//
// TODO(crbug.com/40279749): Avoid code duplication / reuse code.
-#include "build/rust/std/alias.h"
+#include "build/rust/allocator/alias.h"
-#include "build/rust/std/compiler_specific.h"
+#include "build/rust/allocator/compiler_specific.h"
namespace build_rust_std {
namespace debug {
diff --git a/build/rust/std/alias.h b/build/rust/allocator/alias.h
similarity index 91%
rename from build/rust/std/alias.h
rename to build/rust/allocator/alias.h
index 0eaba6766148f..80995ecfb045e 100644
--- a/build/rust/std/alias.h
+++ b/build/rust/allocator/alias.h
@@ -8,8 +8,8 @@
//
// TODO(crbug.com/40279749): Avoid code duplication / reuse code.
-#ifndef BUILD_RUST_STD_ALIAS_H_
-#define BUILD_RUST_STD_ALIAS_H_
+#ifndef BUILD_RUST_ALLOCATOR_ALIAS_H_
+#define BUILD_RUST_ALLOCATOR_ALIAS_H_
#include <stddef.h>
@@ -34,4 +34,4 @@ void Alias(const void* var);
const int line_number = __LINE__; \
build_rust_std::debug::Alias(&line_number)
-#endif // BUILD_RUST_STD_ALIAS_H_
+#endif // BUILD_RUST_ALLOCATOR_ALIAS_H_
diff --git a/build/rust/std/remap_alloc.cc b/build/rust/allocator/allocator_impls.cc
similarity index 67%
rename from build/rust/std/remap_alloc.cc
rename to build/rust/allocator/allocator_impls.cc
index a443b11ec513d..1fde98f23cd12 100644
--- a/build/rust/std/remap_alloc.cc
+++ b/build/rust/allocator/allocator_impls.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "build/rust/allocator/allocator_impls.h"
+
#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/390223051): Remove C-library calls to fix the errors.
#pragma allow_unsafe_libc_calls
@@ -11,9 +13,9 @@
#include <cstring>
#include "build/build_config.h"
-#include "build/rust/std/alias.h"
-#include "build/rust/std/buildflags.h"
-#include "build/rust/std/immediate_crash.h"
+#include "build/rust/allocator/alias.h"
+#include "build/rust/allocator/buildflags.h"
+#include "build/rust/allocator/immediate_crash.h"
#if BUILDFLAG(RUST_ALLOCATOR_USES_PARTITION_ALLOC)
#include "partition_alloc/partition_alloc_constants.h" // nogncheck
@@ -22,6 +24,11 @@
#include <cstdlib>
#endif
+// NOTE: this documentation is outdated.
+//
+// TODO(crbug.com/408221149): update this documentation, or replace it with docs
+// in the Rust allocator implementation.
+//
// When linking a final binary, rustc has to pick between either:
// * The default Rust allocator
// * Any #[global_allocator] defined in *any rlib in its dependency tree*
@@ -87,19 +94,6 @@
// enabling it breaks Win32 APIs like CreateProcess:
// https://issues.chromium.org/u/1/issues/368070343#comment29
-extern "C" {
-
-#ifdef COMPONENT_BUILD
-#if BUILDFLAG(IS_WIN)
-#define REMAP_ALLOC_ATTRIBUTES __declspec(dllexport) __attribute__((weak))
-#else
-#define REMAP_ALLOC_ATTRIBUTES \
- __attribute__((visibility("default"))) __attribute__((weak))
-#endif
-#else
-#define REMAP_ALLOC_ATTRIBUTES __attribute__((weak))
-#endif // COMPONENT_BUILD
-
#if !BUILDFLAG(RUST_ALLOCATOR_USES_PARTITION_ALLOC) && BUILDFLAG(IS_WIN) && \
defined(ADDRESS_SANITIZER)
#define USE_WIN_ALIGNED_MALLOC 1
@@ -107,17 +101,19 @@ extern "C" {
#define USE_WIN_ALIGNED_MALLOC 0
#endif
-// This must exist as the stdlib depends on it to prove that we know the
-// alloc shims below are unstable. In the future we may be required to replace
-// them with a #[global_allocator] crate (see file comment above for more).
-//
-// Marked as weak as when Rust drives linking it includes this symbol itself,
-// and we don't want a collision due to C++ being in the same link target, where
-// C++ causes us to explicitly link in the stdlib and this symbol here.
-[[maybe_unused]]
-__attribute__((weak)) unsigned char __rust_no_alloc_shim_is_unstable;
+// The default allocator functions provided by the Rust standard library.
+extern "C" void* __rdl_alloc(size_t size, size_t align);
+extern "C" void __rdl_dealloc(void* p, size_t size, size_t align);
+extern "C" void* __rdl_realloc(void* p,
+ size_t old_size,
+ size_t align,
+ size_t new_size);
+
+extern "C" void* __rdl_alloc_zeroed(size_t size, size_t align);
+
+namespace rust_allocator_internal {
-REMAP_ALLOC_ATTRIBUTES void* __rust_alloc(size_t size, size_t align) {
+unsigned char* alloc(size_t size, size_t align) {
#if BUILDFLAG(RUST_ALLOCATOR_USES_PARTITION_ALLOC)
// PartitionAlloc will crash if given an alignment larger than this.
if (align > partition_alloc::internal::kMaxSupportedAlignment) {
@@ -125,19 +121,19 @@ REMAP_ALLOC_ATTRIBUTES void* __rust_alloc(size_t size, size_t align) {
}
if (align <= alignof(std::max_align_t)) {
- return allocator_shim::UncheckedAlloc(size);
+ return static_cast<unsigned char*>(allocator_shim::UncheckedAlloc(size));
} else {
- return allocator_shim::UncheckedAlignedAlloc(size, align);
+ return static_cast<unsigned char*>(
+ allocator_shim::UncheckedAlignedAlloc(size, align));
}
#elif USE_WIN_ALIGNED_MALLOC
- return _aligned_malloc(size, align);
+ return static_cast<unsigned char*>(_aligned_malloc(size, align));
#else
- extern void* __rdl_alloc(size_t size, size_t align);
- return __rdl_alloc(size, align);
+ return static_cast<unsigned char*>(__rdl_alloc(size, align));
#endif
}
-REMAP_ALLOC_ATTRIBUTES void __rust_dealloc(void* p, size_t size, size_t align) {
+void dealloc(unsigned char* p, size_t size, size_t align) {
#if BUILDFLAG(RUST_ALLOCATOR_USES_PARTITION_ALLOC)
if (align <= alignof(std::max_align_t)) {
allocator_shim::UncheckedFree(p);
@@ -147,54 +143,44 @@ REMAP_ALLOC_ATTRIBUTES void __rust_dealloc(void* p, size_t size, size_t align) {
#elif USE_WIN_ALIGNED_MALLOC
return _aligned_free(p);
#else
- extern void __rdl_dealloc(void* p, size_t size, size_t align);
__rdl_dealloc(p, size, align);
#endif
}
-REMAP_ALLOC_ATTRIBUTES void* __rust_realloc(void* p,
- size_t old_size,
- size_t align,
- size_t new_size) {
+unsigned char* realloc(unsigned char* p,
+ size_t old_size,
+ size_t align,
+ size_t new_size) {
#if BUILDFLAG(RUST_ALLOCATOR_USES_PARTITION_ALLOC)
if (align <= alignof(std::max_align_t)) {
- return allocator_shim::UncheckedRealloc(p, new_size);
+ return static_cast<unsigned char*>(
+ allocator_shim::UncheckedRealloc(p, new_size));
} else {
- return allocator_shim::UncheckedAlignedRealloc(p, new_size, align);
+ return static_cast<unsigned char*>(
+ allocator_shim::UncheckedAlignedRealloc(p, new_size, align));
}
#elif USE_WIN_ALIGNED_MALLOC
- return _aligned_realloc(p, new_size, align);
+ return static_cast<unsigned char*>(_aligned_realloc(p, new_size, align));
#else
- extern void* __rdl_realloc(void* p, size_t old_size, size_t align,
- size_t new_size);
- return __rdl_realloc(p, old_size, align, new_size);
+ return static_cast<unsigned char*>(
+ __rdl_realloc(p, old_size, align, new_size));
#endif
}
-REMAP_ALLOC_ATTRIBUTES void* __rust_alloc_zeroed(size_t size, size_t align) {
+unsigned char* alloc_zeroed(size_t size, size_t align) {
#if BUILDFLAG(RUST_ALLOCATOR_USES_PARTITION_ALLOC) || USE_WIN_ALIGNED_MALLOC
// TODO(danakj): When RUST_ALLOCATOR_USES_PARTITION_ALLOC is true, it's
// possible that a partition_alloc::UncheckedAllocZeroed() call would perform
// better than partition_alloc::UncheckedAlloc() + memset. But there is no
// such API today. See b/342251590.
- void* p = __rust_alloc(size, align);
+ unsigned char* p = alloc(size, align);
if (p) {
memset(p, 0, size);
}
return p;
#else
- extern void* __rdl_alloc_zeroed(size_t size, size_t align);
- return __rdl_alloc_zeroed(size, align);
+ return static_cast<unsigned char*>(__rdl_alloc_zeroed(size, align));
#endif
}
-REMAP_ALLOC_ATTRIBUTES void __rust_alloc_error_handler(size_t size,
- size_t align) {
- NO_CODE_FOLDING();
- IMMEDIATE_CRASH();
-}
-
-REMAP_ALLOC_ATTRIBUTES extern const unsigned char
- __rust_alloc_error_handler_should_panic = 0;
-
-} // extern "C"
+} // namespace rust_allocator_internal
diff --git a/build/rust/allocator/allocator_impls.h b/build/rust/allocator/allocator_impls.h
new file mode 100644
index 0000000000000..afb335412faf9
--- /dev/null
+++ b/build/rust/allocator/allocator_impls.h
@@ -0,0 +1,25 @@
+// Copyright 2025 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BUILD_RUST_ALLOCATOR_ALLOCATOR_IMPLS_H_
+#define BUILD_RUST_ALLOCATOR_ALLOCATOR_IMPLS_H_
+
+#include <cstddef>
+
+#include "build/build_config.h"
+#include "build/rust/allocator/buildflags.h"
+
+namespace rust_allocator_internal {
+
+unsigned char* alloc(size_t size, size_t align);
+void dealloc(unsigned char* p, size_t size, size_t align);
+unsigned char* realloc(unsigned char* p,
+ size_t old_size,
+ size_t align,
+ size_t new_size);
+unsigned char* alloc_zeroed(size_t size, size_t align);
+
+} // namespace rust_allocator_internal
+
+#endif // BUILD_RUST_ALLOCATOR_ALLOCATOR_IMPLS_H_
diff --git a/build/rust/allocator/allocator_shim_definitions.cc b/build/rust/allocator/allocator_shim_definitions.cc
new file mode 100644
index 0000000000000..a4d1bd77b7016
--- /dev/null
+++ b/build/rust/allocator/allocator_shim_definitions.cc
@@ -0,0 +1,30 @@
+// Copyright 2025 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cstddef>
+
+#include "build/rust/allocator/alias.h"
+#include "build/rust/allocator/immediate_crash.h"
+
+extern "C" {
+
+// As part of rustc's contract for using `#[global_allocator]` without
+// rustc-generated shims we must define this symbol, since we are opting in to
+// unstable functionality. See https://github.com/rust-lang/rust/issues/123015
+//
+// Mark it weak since rustc will generate it when it drives linking.
+[[maybe_unused]]
+__attribute__((weak)) unsigned char __rust_no_alloc_shim_is_unstable;
+
+__attribute__((weak)) void __rust_alloc_error_handler(size_t size,
+ size_t align) {
+ NO_CODE_FOLDING();
+ IMMEDIATE_CRASH();
+}
+
+__attribute__((
+ weak)) extern const unsigned char __rust_alloc_error_handler_should_panic =
+ 0;
+
+} // extern "C"
diff --git a/build/rust/std/compiler_specific.h b/build/rust/allocator/compiler_specific.h
similarity index 87%
rename from build/rust/std/compiler_specific.h
rename to build/rust/allocator/compiler_specific.h
index ea79a7a8dc284..f9079679a3e9a 100644
--- a/build/rust/std/compiler_specific.h
+++ b/build/rust/allocator/compiler_specific.h
@@ -7,8 +7,8 @@
//
// TODO(crbug.com/40279749): Avoid code duplication / reuse code.
-#ifndef BUILD_RUST_STD_COMPILER_SPECIFIC_H_
-#define BUILD_RUST_STD_COMPILER_SPECIFIC_H_
+#ifndef BUILD_RUST_ALLOCATOR_COMPILER_SPECIFIC_H_
+#define BUILD_RUST_ALLOCATOR_COMPILER_SPECIFIC_H_
#include "build/build_config.h"
@@ -35,4 +35,4 @@
#define NOINLINE
#endif
-#endif // BUILD_RUST_STD_COMPILER_SPECIFIC_H_
+#endif // BUILD_RUST_ALLOCATOR_COMPILER_SPECIFIC_H_
diff --git a/build/rust/std/immediate_crash.h b/build/rust/allocator/immediate_crash.h
similarity index 97%
rename from build/rust/std/immediate_crash.h
rename to build/rust/allocator/immediate_crash.h
index e4fd5a09d9379..9cbf9fd65f3e0 100644
--- a/build/rust/std/immediate_crash.h
+++ b/build/rust/allocator/immediate_crash.h
@@ -5,8 +5,8 @@
// This file has been copied from //base/immediate_crash.h.
// TODO(crbug.com/40279749): Avoid code duplication / reuse code.
-#ifndef BUILD_RUST_STD_IMMEDIATE_CRASH_H_
-#define BUILD_RUST_STD_IMMEDIATE_CRASH_H_
+#ifndef BUILD_RUST_ALLOCATOR_IMMEDIATE_CRASH_H_
+#define BUILD_RUST_ALLOCATOR_IMMEDIATE_CRASH_H_
#include "build/build_config.h"
@@ -168,4 +168,4 @@
#endif // defined(__clang__) || defined(COMPILER_GCC)
-#endif // BUILD_RUST_STD_IMMEDIATE_CRASH_H_
+#endif // BUILD_RUST_ALLOCATOR_IMMEDIATE_CRASH_H_
diff --git a/build/rust/allocator/lib.rs b/build/rust/allocator/lib.rs
new file mode 100644
index 0000000000000..7f4a0fc245694
--- /dev/null
+++ b/build/rust/allocator/lib.rs
@@ -0,0 +1,48 @@
+// Copyright 2025 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//! Define the allocator that Rust code in Chrome should use.
+//!
+//! Any final artifact that depends on this crate, even transitively, will use
+//! the allocator defined here. Currently this is a thin wrapper around
+//! allocator_impls.cc's functions; see the documentation there.
+
+use std::alloc::{GlobalAlloc, Layout};
+
+struct Allocator;
+
+unsafe impl GlobalAlloc for Allocator {
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ unsafe { ffi::alloc(layout.size(), layout.align()) }
+ }
+
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+ unsafe {
+ ffi::dealloc(ptr, layout.size(), layout.align());
+ }
+ }
+
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ unsafe { ffi::alloc_zeroed(layout.size(), layout.align()) }
+ }
+
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ unsafe { ffi::realloc(ptr, layout.size(), layout.align(), new_size) }
+ }
+}
+
+#[global_allocator]
+static GLOBAL: Allocator = Allocator;
+
+#[cxx::bridge(namespace = "rust_allocator_internal")]
+mod ffi {
+ extern "C++" {
+ include!("build/rust/allocator/allocator_impls.h");
+
+ unsafe fn alloc(size: usize, align: usize) -> *mut u8;
+ unsafe fn dealloc(p: *mut u8, size: usize, align: usize);
+ unsafe fn realloc(p: *mut u8, old_size: usize, align: usize, new_size: usize) -> *mut u8;
+ unsafe fn alloc_zeroed(size: usize, align: usize) -> *mut u8;
+ }
+}
diff --git a/build/rust/cargo_crate.gni b/build/rust/cargo_crate.gni
index 6d11c538bf4d5..d9912722b4ecd 100644
--- a/build/rust/cargo_crate.gni
+++ b/build/rust/cargo_crate.gni
@@ -259,6 +259,12 @@ template("cargo_crate") {
# Don't import the `chromium` crate into third-party code.
no_chromium_prelude = true
+ # Don't depend on the chrome-specific #[global_allocator] crate from
+ # third-party code. This avoids some dependency cycle issues. The allocator
+ # crate will still be used if it exists anywhere in the dependency graph for
+ # a given linked artifact.
+ no_allocator_crate = true
+
rustc_metadata = _rustc_metadata
# TODO(crbug.com/40259764): don't default to true. This requires changes to
@@ -483,6 +489,9 @@ template("cargo_crate") {
# Don't import the `chromium` crate into third-party code.
no_chromium_prelude = true
+ # Build scripts do not need to link to chrome's allocator.
+ no_allocator_crate = true
+
# The ${_build_script_name}_output target looks for the exe in this
# location. Due to how the Windows component build works, this has to
# be $root_out_dir for all EXEs. In component build, C++ links to the
diff --git a/build/rust/rust_macro.gni b/build/rust/rust_macro.gni
index bcbb30ed44111..41d857632ccdc 100644
--- a/build/rust/rust_macro.gni
+++ b/build/rust/rust_macro.gni
@@ -16,6 +16,9 @@ template("rust_macro") {
forward_variables_from(invoker, TESTONLY_AND_VISIBILITY)
proc_macro_configs = invoker.configs
target_type = "rust_proc_macro"
+
+ # Macros are loaded by rustc and shouldn't use chrome's allocation routines.
+ no_allocator_crate = true
}
}
diff --git a/build/rust/rust_target.gni b/build/rust/rust_target.gni
index 1a2f96337d436..1003a7b678352 100644
--- a/build/rust/rust_target.gni
+++ b/build/rust/rust_target.gni
@@ -339,6 +339,10 @@ template("rust_target") {
_rust_deps += [ "//build/rust/std" ]
}
+ if (!defined(invoker.no_allocator_crate) || !invoker.no_allocator_crate) {
+ _rust_deps += [ "//build/rust/allocator" ]
+ }
+
if (_build_unit_tests) {
_unit_test_target = "${_target_name}_unittests"
if (defined(invoker.unit_test_target)) {
diff --git a/build/rust/std/BUILD.gn b/build/rust/std/BUILD.gn
index 6b996aa1fe386..25db126076b2f 100644
--- a/build/rust/std/BUILD.gn
+++ b/build/rust/std/BUILD.gn
@@ -15,51 +15,12 @@
# allocator functions to PartitionAlloc when `use_partition_alloc_as_malloc` is
# true, so that Rust and C++ use the same allocator backend.
-import("//build/buildflag_header.gni")
import("//build/config/compiler/compiler.gni")
import("//build/config/coverage/coverage.gni")
import("//build/config/rust.gni")
import("//build/config/sanitizers/sanitizers.gni")
-rust_allocator_uses_partition_alloc = false
-if (build_with_chromium) {
- import("//base/allocator/partition_allocator/partition_alloc.gni")
- rust_allocator_uses_partition_alloc = use_partition_alloc_as_malloc
-}
-
-buildflag_header("buildflags") {
- header = "buildflags.h"
- flags = [
- "RUST_ALLOCATOR_USES_PARTITION_ALLOC=$rust_allocator_uses_partition_alloc",
- ]
- visibility = [ ":*" ]
-}
-
if (toolchain_has_rust) {
- # If clang performs the link step, we need to provide the allocator symbols
- # that are normally injected by rustc during linking.
- #
- # We also "happen to" use this to redirect allocations to PartitionAlloc,
- # though that would be better done through a #[global_allocator] crate (see
- # above).
- source_set("remap_alloc") {
- public_deps = []
- if (rust_allocator_uses_partition_alloc) {
- public_deps += [ "//base/allocator/partition_allocator:partition_alloc" ]
- }
- deps = [ ":buildflags" ]
- sources = [
- # `alias.*`, `compiler_specific.h`, and `immediate_crash.*` have been
- # copied from `//base`.
- # TODO(crbug.com/40279749): Avoid duplication / reuse code.
- "alias.cc",
- "alias.h",
- "compiler_specific.h",
- "immediate_crash.h",
- "remap_alloc.cc",
- ]
- }
-
# List of Rust stdlib rlibs which are present in the official Rust toolchain
# we are using from the Android team. This is usually a version or two behind
# nightly. Generally this matches the toolchain we build ourselves, but if
@@ -269,8 +230,6 @@ if (toolchain_has_rust) {
foreach(libname, stdlib_files + skip_stdlib_files) {
deps += [ "rules:$libname" ]
}
-
- public_deps = [ ":remap_alloc" ]
}
} else {
action("find_stdlib") {
diff --git a/components/cronet/android/dependencies.txt b/components/cronet/android/dependencies.txt
index bf56bc45ed41f..c0e41ef7c6766 100644
--- a/components/cronet/android/dependencies.txt
+++ b/components/cronet/android/dependencies.txt
@@ -14,6 +14,7 @@
//build/config
//build/config/compiler
//build/rust
+//build/rust/allocator
//build/rust/chromium_prelude
//build/rust/std
//build/rust/std/rules
diff --git a/third_party/breakpad/BUILD.gn b/third_party/breakpad/BUILD.gn
index 007fdff16e92e..00da4fa484998 100644
--- a/third_party/breakpad/BUILD.gn
+++ b/third_party/breakpad/BUILD.gn
@@ -495,7 +495,10 @@ if (is_mac) {
defines = [ "HAVE_MACH_O_NLIST_H" ]
# Rust demangle support.
- deps = [ "//third_party/rust/rustc_demangle_capi/v0_1:lib" ]
+ deps = [
+ "//build/rust/allocator",
+ "//third_party/rust/rustc_demangle_capi/v0_1:lib",
+ ]
defines += [ "HAVE_RUSTC_DEMANGLE" ]
include_dirs += [ "//third_party/rust/chromium_crates_io/vendor/rustc-demangle-capi-0.1.0/include" ]
sources += [ "//third_party/rust/chromium_crates_io/vendor/rustc-demangle-capi-0.1.0/include/rustc_demangle.h" ]
@@ -743,7 +746,10 @@ if (is_linux || is_chromeos || is_android) {
include_dirs = [ "breakpad/src" ]
# Rust demangle support.
- deps = [ "//third_party/rust/rustc_demangle_capi/v0_1:lib" ]
+ deps = [
+ "//build/rust/allocator",
+ "//third_party/rust/rustc_demangle_capi/v0_1:lib",
+ ]
defines += [ "HAVE_RUSTC_DEMANGLE" ]
include_dirs += [ "//third_party/rust/chromium_crates_io/vendor/rustc-demangle-capi-0.1.0/include" ]
sources += [ "//third_party/rust/chromium_crates_io/vendor/rustc-demangle-capi-0.1.0/include/rustc_demangle.h" ]

View file

@ -1,319 +0,0 @@
From 5032162442c5f2f3093cd7646f3a06f826d7f7a8 Mon Sep 17 00:00:00 2001
From: Collin Baker <collinbaker@chromium.org>
Date: Mon, 7 Apr 2025 12:48:17 -0700
Subject: [PATCH] Call Rust default allocator directly from Rust
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
The Chromium `#[global_allocator] crate forwarded calls to the C++
implementation, which in turn called into the Rust standard library
implementations in some build configurations.
This Rust -> C++ -> Rust round trip is unnecessary, and the references
to these symbols is blocking a toolchain update: upstream, these
symbol names are now mangled.
Instead, use Rust conditional compilation to choose between the
Chromium and the libstd-provided allocators.
Additionally, the remaining internal symbols defined in C++ are moved
to Rust.
Bug: 408221149, 407024458
Change-Id: I78f8c90d51a36a73099aa7d333091d7b8aded3c0
Cq-Include-Trybots: luci.chromium.try:android-rust-arm32-rel,android-rust-arm64-dbg,android-rust-arm64-rel,linux-rust-x64-dbg,linux-rust-x64-rel,mac-rust-x64-dbg,win-rust-x64-dbg,win-rust-x64-rel
Change-Id: I78f8c90d51a36a73099aa7d333091d7b8aded3c0
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/6434355
Reviewed-by: Łukasz Anforowicz <lukasza@chromium.org>
Commit-Queue: Collin Baker <collinbaker@chromium.org>
Cr-Commit-Position: refs/heads/main@{#1443703}
---
build/rust/allocator/BUILD.gn | 54 +++++++------------
build/rust/allocator/allocator_impls.cc | 28 +++++-----
build/rust/allocator/allocator_impls.h | 2 +
.../allocator/allocator_shim_definitions.cc | 30 -----------
build/rust/allocator/lib.rs | 38 +++++++++++++
5 files changed, 73 insertions(+), 79 deletions(-)
delete mode 100644 build/rust/allocator/allocator_shim_definitions.cc
diff --git a/build/rust/allocator/BUILD.gn b/build/rust/allocator/BUILD.gn
index 06aa47f097c9c..f09314afc8158 100644
--- a/build/rust/allocator/BUILD.gn
+++ b/build/rust/allocator/BUILD.gn
@@ -12,6 +12,9 @@ if (build_with_chromium) {
rust_allocator_uses_partition_alloc = use_partition_alloc_as_malloc
}
+use_cpp_allocator_impls =
+ rust_allocator_uses_partition_alloc || (is_win && is_asan)
+
buildflag_header("buildflags") {
header = "buildflags.h"
flags = [
@@ -30,61 +33,44 @@ if (toolchain_has_rust) {
crate_root = "lib.rs"
cxx_bindings = [ "lib.rs" ]
- deps = [
- ":allocator_impls",
- ":allocator_shim_definitions",
- ]
+ deps = [ ":allocator_impls" ]
no_chromium_prelude = true
no_allocator_crate = true
allow_unsafe = true
+
+ if (use_cpp_allocator_impls) {
+ rustflags = [
+ "--cfg",
+ "use_cpp_allocator_impls",
+ ]
+ }
+
+ configs -= [ "//build/config/compiler:disallow_unstable_features" ]
}
+ # TODO(crbug.com/408221149): don't build this when `use_cpp_allocator_impls`
+ # is false.
static_library("allocator_impls") {
public_deps = []
if (rust_allocator_uses_partition_alloc) {
public_deps += [ "//base/allocator/partition_allocator:partition_alloc" ]
}
- sources = [
- "allocator_impls.cc",
- "allocator_impls.h",
- ]
-
- deps = [
- ":allocator_cpp_shared",
- ":buildflags",
-
- # TODO(crbug.com/408221149): remove the C++ -> Rust dependency for the
- # default allocator.
- "//build/rust/std",
- ]
-
- visibility = [ ":*" ]
- }
-
- source_set("allocator_shim_definitions") {
- sources = [ "allocator_shim_definitions.cc" ]
-
- deps = [ ":allocator_cpp_shared" ]
-
- visibility = [ ":*" ]
- }
-
- source_set("allocator_cpp_shared") {
sources = [
# `alias.*`, `compiler_specific.h`, and `immediate_crash.*` have been
# copied from `//base`.
# TODO(crbug.com/40279749): Avoid duplication / reuse code.
"alias.cc",
"alias.h",
+ "allocator_impls.cc",
+ "allocator_impls.h",
"compiler_specific.h",
"immediate_crash.h",
]
- visibility = [
- ":allocator_impls",
- ":allocator_shim_definitions",
- ]
+ deps = [ ":buildflags" ]
+
+ visibility = [ ":*" ]
}
}
diff --git a/build/rust/allocator/allocator_impls.cc b/build/rust/allocator/allocator_impls.cc
index 1fde98f23cd12..bf3c2a301adf5 100644
--- a/build/rust/allocator/allocator_impls.cc
+++ b/build/rust/allocator/allocator_impls.cc
@@ -101,16 +101,6 @@
#define USE_WIN_ALIGNED_MALLOC 0
#endif
-// The default allocator functions provided by the Rust standard library.
-extern "C" void* __rdl_alloc(size_t size, size_t align);
-extern "C" void __rdl_dealloc(void* p, size_t size, size_t align);
-extern "C" void* __rdl_realloc(void* p,
- size_t old_size,
- size_t align,
- size_t new_size);
-
-extern "C" void* __rdl_alloc_zeroed(size_t size, size_t align);
-
namespace rust_allocator_internal {
unsigned char* alloc(size_t size, size_t align) {
@@ -129,7 +119,8 @@ unsigned char* alloc(size_t size, size_t align) {
#elif USE_WIN_ALIGNED_MALLOC
return static_cast<unsigned char*>(_aligned_malloc(size, align));
#else
- return static_cast<unsigned char*>(__rdl_alloc(size, align));
+ // TODO(crbug.com/408221149): don't build this file in this case.
+ IMMEDIATE_CRASH();
#endif
}
@@ -143,7 +134,8 @@ void dealloc(unsigned char* p, size_t size, size_t align) {
#elif USE_WIN_ALIGNED_MALLOC
return _aligned_free(p);
#else
- __rdl_dealloc(p, size, align);
+ // TODO(crbug.com/408221149): don't build this file in this case.
+ IMMEDIATE_CRASH();
#endif
}
@@ -162,8 +154,8 @@ unsigned char* realloc(unsigned char* p,
#elif USE_WIN_ALIGNED_MALLOC
return static_cast<unsigned char*>(_aligned_realloc(p, new_size, align));
#else
- return static_cast<unsigned char*>(
- __rdl_realloc(p, old_size, align, new_size));
+ // TODO(crbug.com/408221149): don't build this file in this case.
+ IMMEDIATE_CRASH();
#endif
}
@@ -179,8 +171,14 @@ unsigned char* alloc_zeroed(size_t size, size_t align) {
}
return p;
#else
- return static_cast<unsigned char*>(__rdl_alloc_zeroed(size, align));
+ // TODO(crbug.com/408221149): don't build this file in this case.
+ IMMEDIATE_CRASH();
#endif
}
+void crash_immediately() {
+ NO_CODE_FOLDING();
+ IMMEDIATE_CRASH();
+}
+
} // namespace rust_allocator_internal
diff --git a/build/rust/allocator/allocator_impls.h b/build/rust/allocator/allocator_impls.h
index afb335412faf9..e90ab7cd422c1 100644
--- a/build/rust/allocator/allocator_impls.h
+++ b/build/rust/allocator/allocator_impls.h
@@ -20,6 +20,8 @@ unsigned char* realloc(unsigned char* p,
size_t new_size);
unsigned char* alloc_zeroed(size_t size, size_t align);
+void crash_immediately();
+
} // namespace rust_allocator_internal
#endif // BUILD_RUST_ALLOCATOR_ALLOCATOR_IMPLS_H_
diff --git a/build/rust/allocator/allocator_shim_definitions.cc b/build/rust/allocator/allocator_shim_definitions.cc
deleted file mode 100644
index a4d1bd77b7016..0000000000000
--- a/build/rust/allocator/allocator_shim_definitions.cc
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2025 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <cstddef>
-
-#include "build/rust/allocator/alias.h"
-#include "build/rust/allocator/immediate_crash.h"
-
-extern "C" {
-
-// As part of rustc's contract for using `#[global_allocator]` without
-// rustc-generated shims we must define this symbol, since we are opting in to
-// unstable functionality. See https://github.com/rust-lang/rust/issues/123015
-//
-// Mark it weak since rustc will generate it when it drives linking.
-[[maybe_unused]]
-__attribute__((weak)) unsigned char __rust_no_alloc_shim_is_unstable;
-
-__attribute__((weak)) void __rust_alloc_error_handler(size_t size,
- size_t align) {
- NO_CODE_FOLDING();
- IMMEDIATE_CRASH();
-}
-
-__attribute__((
- weak)) extern const unsigned char __rust_alloc_error_handler_should_panic =
- 0;
-
-} // extern "C"
diff --git a/build/rust/allocator/lib.rs b/build/rust/allocator/lib.rs
index 7f4a0fc245694..b8b67d9c6c649 100644
--- a/build/rust/allocator/lib.rs
+++ b/build/rust/allocator/lib.rs
@@ -8,10 +8,20 @@
//! the allocator defined here. Currently this is a thin wrapper around
//! allocator_impls.cc's functions; see the documentation there.
+// Required to apply weak linkage to symbols.
+#![feature(linkage)]
+// Required to apply `#[rustc_std_internal_symbol]` to our alloc error handler
+// so the name is correctly mangled as rustc expects.
+#![cfg_attr(mangle_alloc_error_handler, allow(internal_features))]
+#![cfg_attr(mangle_alloc_error_handler, feature(rustc_attrs))]
+
+#[cfg(use_cpp_allocator_impls)]
use std::alloc::{GlobalAlloc, Layout};
+#[cfg(use_cpp_allocator_impls)]
struct Allocator;
+#[cfg(use_cpp_allocator_impls)]
unsafe impl GlobalAlloc for Allocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
unsafe { ffi::alloc(layout.size(), layout.align()) }
@@ -32,9 +42,36 @@ unsafe impl GlobalAlloc for Allocator {
}
}
+#[cfg(use_cpp_allocator_impls)]
#[global_allocator]
static GLOBAL: Allocator = Allocator;
+#[cfg(not(use_cpp_allocator_impls))]
+#[global_allocator]
+static GLOBAL: std::alloc::System = std::alloc::System;
+
+// As part of rustc's contract for using `#[global_allocator]` without
+// rustc-generated shims we must define this symbol, since we are opting in to
+// unstable functionality. See https://github.com/rust-lang/rust/issues/123015
+#[no_mangle]
+#[linkage = "weak"]
+static __rust_no_alloc_shim_is_unstable: u8 = 0;
+
+#[no_mangle]
+#[linkage = "weak"]
+static __rust_alloc_error_handler_should_panic: u8 = 0;
+
+// Mangle the symbol name as rustc expects.
+#[cfg_attr(mangle_alloc_error_handler, rustc_std_internal_symbol)]
+#[cfg_attr(not(mangle_alloc_error_handler), no_mangle)]
+#[linkage = "weak"]
+fn __rust_alloc_error_handler(_size: usize, _align: usize) {
+ unsafe { ffi::crash_immediately() }
+}
+
+// TODO(crbug.com/408221149): conditionally include the FFI glue based on
+// `use_cpp_allocator_impls`
+#[allow(dead_code)]
#[cxx::bridge(namespace = "rust_allocator_internal")]
mod ffi {
extern "C++" {
@@ -44,5 +81,6 @@ mod ffi {
unsafe fn dealloc(p: *mut u8, size: usize, align: usize);
unsafe fn realloc(p: *mut u8, old_size: usize, align: usize, new_size: usize) -> *mut u8;
unsafe fn alloc_zeroed(size: usize, align: usize) -> *mut u8;
+ unsafe fn crash_immediately();
}
}

View file

@ -1,102 +0,0 @@
reduced -lnl
From e201e2d467b0daad6cdbbfcd5b0e34760e4099c1 Mon Sep 17 00:00:00 2001
From: Alan Zhao <ayzhao@google.com>
Date: Mon, 7 Apr 2025 18:15:01 -0700
Subject: [PATCH] Roll rust *only* f7b43542838f0a4a6cfdb17fbeadf45002042a77-1 :
3f690c2257b7080cd3a8cce64e082fc972148990-1
https://chromium.googlesource.com/external/github.com/rust-lang/rust/+log/f7b43542838f..3f690c2257b7
Ran: ./tools/clang/scripts/upload_revision.py 5b36835df010c5813808d34e45428c624fb52ff1
Additionally, add fixes to the rust allocator to address https://crbug.com/407024458.
Bug: 404285928,407024458
Disable-Rts: True
Cq-Include-Trybots: chromium/try:chromeos-amd64-generic-cfi-thin-lto-rel
Cq-Include-Trybots: chromium/try:dawn-win10-x86-deps-rel
Cq-Include-Trybots: chromium/try:linux-chromeos-dbg
Cq-Include-Trybots: chromium/try:linux_chromium_cfi_rel_ng
Cq-Include-Trybots: chromium/try:linux_chromium_chromeos_msan_rel_ng
Cq-Include-Trybots: chromium/try:linux_chromium_msan_rel_ng
Cq-Include-Trybots: chromium/try:mac11-arm64-rel,mac_chromium_asan_rel_ng
Cq-Include-Trybots: chromium/try:ios-catalyst,win-asan,android-official
Cq-Include-Trybots: chromium/try:fuchsia-arm64-cast-receiver-rel
Cq-Include-Trybots: chromium/try:mac-official,linux-official
Cq-Include-Trybots: chromium/try:win-official,win32-official
Cq-Include-Trybots: chromium/try:win-swangle-try-x86
Cq-Include-Trybots: chromium/try:android-cronet-riscv64-dbg
Cq-Include-Trybots: chromium/try:android-cronet-riscv64-rel
Cq-Include-Trybots: chrome/try:iphone-device
Cq-Include-Trybots: chrome/try:linux-chromeos-chrome
Cq-Include-Trybots: chrome/try:win-chrome,win64-chrome,linux-chrome,mac-chrome
Cq-Include-Trybots: chrome/try:linux-pgo,mac-pgo,win32-pgo,win64-pgo
Cq-Include-Trybots: luci.chromium.try:linux-cast-x64-rel
Cq-Include-Trybots: chromium/try:android-rust-arm32-rel
Cq-Include-Trybots: chromium/try:android-rust-arm64-dbg
Cq-Include-Trybots: chromium/try:android-rust-arm64-rel
Cq-Include-Trybots: chromium/try:linux-rust-x64-dbg
Cq-Include-Trybots: chromium/try:linux-rust-x64-rel
Cq-Include-Trybots: chromium/try:mac-rust-x64-dbg
Cq-Include-Trybots: chromium/try:win-rust-x64-dbg
Cq-Include-Trybots: chromium/try:win-rust-x64-rel
Change-Id: Iec99681a89deaf3f2c79c76f9c4d1c2b2b7d6fe1
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/6439711
Reviewed-by: Collin Baker <collinbaker@chromium.org>
Commit-Queue: Alan Zhao <ayzhao@google.com>
Cr-Commit-Position: refs/heads/main@{#1443873}
---
build/rust/allocator/BUILD.gn | 6 +-
build/rust/allocator/lib.rs | 6 +-
build/rust/std/rules/BUILD.gn | 476 +++++++++++++++++-----------------
tools/rust/update_rust.py | 2 +-
4 files changed, 251 insertions(+), 239 deletions(-)
diff --git a/build/rust/allocator/BUILD.gn b/build/rust/allocator/BUILD.gn
index f09314afc8158..ca581630c76c9 100644
--- a/build/rust/allocator/BUILD.gn
+++ b/build/rust/allocator/BUILD.gn
@@ -32,6 +32,10 @@ if (toolchain_has_rust) {
sources = [ "lib.rs" ]
crate_root = "lib.rs"
cxx_bindings = [ "lib.rs" ]
+ rustflags = [
+ "--cfg",
+ "mangle_alloc_error_handler",
+ ]
deps = [ ":allocator_impls" ]
@@ -40,7 +44,7 @@ if (toolchain_has_rust) {
allow_unsafe = true
if (use_cpp_allocator_impls) {
- rustflags = [
+ rustflags += [
"--cfg",
"use_cpp_allocator_impls",
]
diff --git a/build/rust/allocator/lib.rs b/build/rust/allocator/lib.rs
index b8b67d9c6c649..4e2dad3d542a8 100644
--- a/build/rust/allocator/lib.rs
+++ b/build/rust/allocator/lib.rs
@@ -57,13 +57,17 @@ static GLOBAL: std::alloc::System = std::alloc::System;
#[linkage = "weak"]
static __rust_no_alloc_shim_is_unstable: u8 = 0;
-#[no_mangle]
+// Mangle the symbol name as rustc expects.
+#[cfg_attr(mangle_alloc_error_handler, rustc_std_internal_symbol)]
+#[cfg_attr(not(mangle_alloc_error_handler), no_mangle)]
+#[allow(non_upper_case_globals)]
#[linkage = "weak"]
static __rust_alloc_error_handler_should_panic: u8 = 0;
// Mangle the symbol name as rustc expects.
#[cfg_attr(mangle_alloc_error_handler, rustc_std_internal_symbol)]
#[cfg_attr(not(mangle_alloc_error_handler), no_mangle)]
+#[allow(non_upper_case_globals)]
#[linkage = "weak"]
fn __rust_alloc_error_handler(_size: usize, _align: usize) {
unsafe { ffi::crash_immediately() }

View file

@ -1,44 +0,0 @@
From 4a0377f0b847af505915b0e0a6c4178d4e7c3244 Mon Sep 17 00:00:00 2001
From: Matt Jolly <kangie@gentoo.org>
Date: Mon, 14 Apr 2025 20:16:46 -0700
Subject: [PATCH] Drop `remap_alloc` dep
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
commit e3a1797dbab3eaa1c808d53215b32c8759d27ac7 dropped the source set
that this refers to, in favour of a more modern, crate-based solution.
This seems to have been overlooked, possibly as it only appears to
be called if using the unbundle toolchain.
Bug: 408221149
Signed-off-by: Matt Jolly <kangie@gentoo.org>
Change-Id: I1703d8e1e456161aa2b736169eec407235847099
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/6456604
Reviewed-by: Andrew Grieve <agrieve@chromium.org>
Reviewed-by: Lei Zhang <thestig@chromium.org>
Commit-Queue: Łukasz Anforowicz <lukasza@chromium.org>
Reviewed-by: Łukasz Anforowicz <lukasza@chromium.org>
Cr-Commit-Position: refs/heads/main@{#1446912}
---
build/rust/std/BUILD.gn | 6 ------
1 file changed, 6 deletions(-)
diff --git a/build/rust/std/BUILD.gn b/build/rust/std/BUILD.gn
index 25db126076b2f..bb2c9884520b3 100644
--- a/build/rust/std/BUILD.gn
+++ b/build/rust/std/BUILD.gn
@@ -355,12 +355,6 @@ if (toolchain_has_rust) {
":stdlib_public_dependent_libs",
]
deps = [ ":prebuilt_rustc_copy_to_sysroot" ]
-
- # The host builds tools toolchain supports Rust only and does not use
- # the allocator remapping to point it to PartitionAlloc.
- if (!toolchain_for_rust_host_build_tools) {
- deps += [ ":remap_alloc" ]
- }
}
}
}

View file

@ -1,354 +0,0 @@
From e65cb388e5da56d1236607e0db9cadf89e50eded Mon Sep 17 00:00:00 2001
From: Lukasz Anforowicz <lukasza@chromium.org>
Date: Tue, 15 Apr 2025 11:10:19 -0700
Subject: [PATCH] [rust] Clean up `//build/rust/allocator` after a Rust
toolchain roll.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This CL makes minor tweaks and changes under `//build/rust/allocator`:
* Thanks to the Rust toolchain roll, we no longer need to keep two
implementations, picking between them using the
`mangle_alloc_error_handler` configuration knob.
* The `#[cfg(use_cpp_allocator_impls)]` vs
`#[cfg(not(use_cpp_allocator_impls))]` choices have been deduplicated
by putting the related/conditional stuff under `mod cpp_allocator` and
`rust_allocator`.
* Closes a minor gap missed in https://crrev.com/c/6432410:
- Moving `DEPS` file to the new source location
Bug: 408221149
Change-Id: Id541797e03da113a5271b02a5f60eb2be08254a9
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/6454872
Reviewed-by: Alan Zhao <ayzhao@google.com>
Commit-Queue: Łukasz Anforowicz <lukasza@chromium.org>
Cr-Commit-Position: refs/heads/main@{#1447241}
---
build/rust/allocator/BUILD.gn | 11 +-
build/rust/{std => allocator}/DEPS | 2 +-
build/rust/allocator/allocator_impls.cc | 65 ++----------
build/rust/allocator/allocator_impls.h | 2 +
build/rust/allocator/lib.rs | 132 +++++++++++++++---------
5 files changed, 97 insertions(+), 115 deletions(-)
rename build/rust/{std => allocator}/DEPS (76%)
diff --git a/build/rust/allocator/BUILD.gn b/build/rust/allocator/BUILD.gn
index ca581630c76c9..434a61e11bdbb 100644
--- a/build/rust/allocator/BUILD.gn
+++ b/build/rust/allocator/BUILD.gn
@@ -32,10 +32,6 @@ if (toolchain_has_rust) {
sources = [ "lib.rs" ]
crate_root = "lib.rs"
cxx_bindings = [ "lib.rs" ]
- rustflags = [
- "--cfg",
- "mangle_alloc_error_handler",
- ]
deps = [ ":allocator_impls" ]
@@ -43,13 +39,12 @@ if (toolchain_has_rust) {
no_allocator_crate = true
allow_unsafe = true
+ rustflags = []
if (use_cpp_allocator_impls) {
- rustflags += [
- "--cfg",
- "use_cpp_allocator_impls",
- ]
+ rustflags += [ "--cfg=use_cpp_allocator_impls" ]
}
+ # TODO(https://crbug.com/410596442): Stop using unstable features here.
configs -= [ "//build/config/compiler:disallow_unstable_features" ]
}
diff --git a/build/rust/std/DEPS b/build/rust/allocator/DEPS
similarity index 76%
rename from build/rust/std/DEPS
rename to build/rust/allocator/DEPS
index eb524c0a06acd..923a2e07c80f4 100644
--- a/build/rust/std/DEPS
+++ b/build/rust/allocator/DEPS
@@ -3,7 +3,7 @@ include_rules = [
]
specific_include_rules = {
- "remap_alloc.cc" : [
+ "allocator_impls.cc" : [
"+partition_alloc"
]
}
diff --git a/build/rust/allocator/allocator_impls.cc b/build/rust/allocator/allocator_impls.cc
index bf3c2a301adf5..8887752f3dfad 100644
--- a/build/rust/allocator/allocator_impls.cc
+++ b/build/rust/allocator/allocator_impls.cc
@@ -24,62 +24,6 @@
#include <cstdlib>
#endif
-// NOTE: this documentation is outdated.
-//
-// TODO(crbug.com/408221149): update this documentation, or replace it with docs
-// in the Rust allocator implementation.
-//
-// When linking a final binary, rustc has to pick between either:
-// * The default Rust allocator
-// * Any #[global_allocator] defined in *any rlib in its dependency tree*
-// (https://doc.rust-lang.org/edition-guide/rust-2018/platform-and-target-support/global-allocators.html)
-//
-// In this latter case, this fact will be recorded in some of the metadata
-// within the .rlib file. (An .rlib file is just a .a file, but does have
-// additional metadata for use by rustc. This is, as far as I know, the only
-// such metadata we would ideally care about.)
-//
-// In all the linked rlibs,
-// * If 0 crates define a #[global_allocator], rustc uses its default allocator
-// * If 1 crate defines a #[global_allocator], rustc uses that
-// * If >1 crates define a #[global_allocator], rustc bombs out.
-//
-// Because rustc does these checks, it doesn't just have the __rust_alloc
-// symbols defined anywhere (neither in the stdlib nor in any of these
-// crates which have a #[global_allocator] defined.)
-//
-// Instead:
-// Rust's final linking stage invokes dynamic LLVM codegen to create symbols
-// for the basic heap allocation operations. It literally creates a
-// __rust_alloc symbol at link time. Unless any crate has specified a
-// #[global_allocator], it simply calls from __rust_alloc into
-// __rdl_alloc, which is the default Rust allocator. The same applies to a
-// few other symbols.
-//
-// We're not (always) using rustc for final linking. For cases where we're not
-// Rustc as the final linker, we'll define those symbols here instead. This
-// allows us to redirect allocation to PartitionAlloc if clang is doing the
-// link.
-//
-// We use unchecked allocation paths in PartitionAlloc rather than going through
-// its shims in `malloc()` etc so that we can support fallible allocation paths
-// such as Vec::try_reserve without crashing on allocation failure.
-//
-// In future, we should build a crate with a #[global_allocator] and
-// redirect these symbols back to Rust in order to use to that crate instead.
-// This would allow Rust-linked executables to:
-// 1. Use PartitionAlloc on Windows. The stdlib uses Windows heap functions
-// directly that PartitionAlloc can not intercept.
-// 2. Have `Vec::try_reserve` to fail at runtime on Linux instead of crashing in
-// malloc() where PartitionAlloc replaces that function.
-//
-// They're weak symbols, because this file will sometimes end up in targets
-// which are linked by rustc, and thus we would otherwise get duplicate
-// definitions. The following definitions will therefore only end up being
-// used in targets which are linked by our C++ toolchain.
-//
-// # On Windows ASAN
-//
// In ASAN builds, PartitionAlloc-Everywhere is disabled, meaning malloc() and
// friends in C++ do not go to PartitionAlloc. So we also don't point the Rust
// allocation functions at PartitionAlloc. Generally, this means we just direct
@@ -93,7 +37,6 @@
// Note that there is a runtime option to make ASAN hook HeapAlloc() but
// enabling it breaks Win32 APIs like CreateProcess:
// https://issues.chromium.org/u/1/issues/368070343#comment29
-
#if !BUILDFLAG(RUST_ALLOCATOR_USES_PARTITION_ALLOC) && BUILDFLAG(IS_WIN) && \
defined(ADDRESS_SANITIZER)
#define USE_WIN_ALIGNED_MALLOC 1
@@ -110,6 +53,10 @@ unsigned char* alloc(size_t size, size_t align) {
return nullptr;
}
+ // We use unchecked allocation paths in PartitionAlloc rather than going
+ // through its shims in `malloc()` etc so that we can support fallible
+ // allocation paths such as Vec::try_reserve without crashing on allocation
+ // failure.
if (align <= alignof(std::max_align_t)) {
return static_cast<unsigned char*>(allocator_shim::UncheckedAlloc(size));
} else {
@@ -144,6 +91,10 @@ unsigned char* realloc(unsigned char* p,
size_t align,
size_t new_size) {
#if BUILDFLAG(RUST_ALLOCATOR_USES_PARTITION_ALLOC)
+ // We use unchecked allocation paths in PartitionAlloc rather than going
+ // through its shims in `malloc()` etc so that we can support fallible
+ // allocation paths such as Vec::try_reserve without crashing on allocation
+ // failure.
if (align <= alignof(std::max_align_t)) {
return static_cast<unsigned char*>(
allocator_shim::UncheckedRealloc(p, new_size));
diff --git a/build/rust/allocator/allocator_impls.h b/build/rust/allocator/allocator_impls.h
index e90ab7cd422c1..e562a877d886e 100644
--- a/build/rust/allocator/allocator_impls.h
+++ b/build/rust/allocator/allocator_impls.h
@@ -10,6 +10,8 @@
#include "build/build_config.h"
#include "build/rust/allocator/buildflags.h"
+// This header exposes PartitionAlloc to Rust
+// (most APIs below are called from `impl GlobalAlloc` in `lib.rs`).
namespace rust_allocator_internal {
unsigned char* alloc(size_t size, size_t align);
diff --git a/build/rust/allocator/lib.rs b/build/rust/allocator/lib.rs
index 4e2dad3d542a8..a4f898f9b107f 100644
--- a/build/rust/allocator/lib.rs
+++ b/build/rust/allocator/lib.rs
@@ -5,72 +5,106 @@
//! Define the allocator that Rust code in Chrome should use.
//!
//! Any final artifact that depends on this crate, even transitively, will use
-//! the allocator defined here. Currently this is a thin wrapper around
-//! allocator_impls.cc's functions; see the documentation there.
+//! the allocator defined here.
+//!
+//! List of known issues:
+//!
+//! 1. We'd like to use PartitionAlloc on Windows, but the stdlib uses Windows
+//! heap functions directly that PartitionAlloc can not intercept.
+//! 2. We'd like `Vec::try_reserve` to fail at runtime on Linux instead of
+//! crashing in malloc() where PartitionAlloc replaces that function.
// Required to apply weak linkage to symbols.
+//
+// TODO(https://crbug.com/410596442): Stop using unstable features here.
+// https://github.com/rust-lang/rust/issues/29603 tracks stabilization of the `linkage` feature.
#![feature(linkage)]
// Required to apply `#[rustc_std_internal_symbol]` to our alloc error handler
// so the name is correctly mangled as rustc expects.
-#![cfg_attr(mangle_alloc_error_handler, allow(internal_features))]
-#![cfg_attr(mangle_alloc_error_handler, feature(rustc_attrs))]
+//
+// TODO(https://crbug.com/410596442): Stop using internal features here.
+#![allow(internal_features)]
+#![feature(rustc_attrs)]
+/// Module that provides `#[global_allocator]` / `GlobalAlloc` interface for
+/// using an allocator from C++.
#[cfg(use_cpp_allocator_impls)]
-use std::alloc::{GlobalAlloc, Layout};
+mod cpp_allocator {
+ use super::ffi;
+ use std::alloc::{GlobalAlloc, Layout};
-#[cfg(use_cpp_allocator_impls)]
-struct Allocator;
+ struct Allocator;
-#[cfg(use_cpp_allocator_impls)]
-unsafe impl GlobalAlloc for Allocator {
- unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
- unsafe { ffi::alloc(layout.size(), layout.align()) }
- }
+ unsafe impl GlobalAlloc for Allocator {
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ unsafe { ffi::alloc(layout.size(), layout.align()) }
+ }
- unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
- unsafe {
- ffi::dealloc(ptr, layout.size(), layout.align());
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+ unsafe {
+ ffi::dealloc(ptr, layout.size(), layout.align());
+ }
}
- }
- unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
- unsafe { ffi::alloc_zeroed(layout.size(), layout.align()) }
- }
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ unsafe { ffi::alloc_zeroed(layout.size(), layout.align()) }
+ }
- unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
- unsafe { ffi::realloc(ptr, layout.size(), layout.align(), new_size) }
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ unsafe { ffi::realloc(ptr, layout.size(), layout.align(), new_size) }
+ }
}
-}
-#[cfg(use_cpp_allocator_impls)]
-#[global_allocator]
-static GLOBAL: Allocator = Allocator;
+ #[global_allocator]
+ static GLOBAL: Allocator = Allocator;
+}
+/// Module that provides `#[global_allocator]` / `GlobalAlloc` interface for
+/// using the default Rust allocator.
#[cfg(not(use_cpp_allocator_impls))]
-#[global_allocator]
-static GLOBAL: std::alloc::System = std::alloc::System;
-
-// As part of rustc's contract for using `#[global_allocator]` without
-// rustc-generated shims we must define this symbol, since we are opting in to
-// unstable functionality. See https://github.com/rust-lang/rust/issues/123015
-#[no_mangle]
-#[linkage = "weak"]
-static __rust_no_alloc_shim_is_unstable: u8 = 0;
-
-// Mangle the symbol name as rustc expects.
-#[cfg_attr(mangle_alloc_error_handler, rustc_std_internal_symbol)]
-#[cfg_attr(not(mangle_alloc_error_handler), no_mangle)]
-#[allow(non_upper_case_globals)]
-#[linkage = "weak"]
-static __rust_alloc_error_handler_should_panic: u8 = 0;
-
-// Mangle the symbol name as rustc expects.
-#[cfg_attr(mangle_alloc_error_handler, rustc_std_internal_symbol)]
-#[cfg_attr(not(mangle_alloc_error_handler), no_mangle)]
-#[allow(non_upper_case_globals)]
-#[linkage = "weak"]
-fn __rust_alloc_error_handler(_size: usize, _align: usize) {
- unsafe { ffi::crash_immediately() }
+mod rust_allocator {
+ #[global_allocator]
+ static GLOBAL: std::alloc::System = std::alloc::System;
+}
+
+/// Module that provides global symbols that are needed both by `cpp_allocator`
+/// and `rust_allocator`.
+///
+/// When `rustc` drives linking, then it will define the symbols below. But
+/// Chromium only uses `rustc` to link Rust-only executables (e.g. `build.rs`
+/// scripts) and otherwise uses a non-Rust linker. This is why we have to
+/// manually define a few symbols below. We define those symbols
+/// as "weak" symbols, so that Rust-provided symbols "win" in case where Rust
+/// actually does drive the linking. This hack works (not only for Chromium,
+/// but also for google3 and other projects), but isn't officially supported by
+/// `rustc`.
+///
+/// TODO(https://crbug.com/410596442): Stop using internal features here.
+mod both_allocators {
+ use super::ffi;
+
+ /// As part of rustc's contract for using `#[global_allocator]` without
+ /// rustc-generated shims we must define this symbol, since we are opting in
+ /// to unstable functionality. See https://github.com/rust-lang/rust/issues/123015
+ #[no_mangle]
+ #[linkage = "weak"]
+ static __rust_no_alloc_shim_is_unstable: u8 = 0;
+
+ // Mangle the symbol name as rustc expects.
+ #[rustc_std_internal_symbol]
+ #[allow(non_upper_case_globals)]
+ #[linkage = "weak"]
+ static __rust_alloc_error_handler_should_panic: u8 = 0;
+
+ // Mangle the symbol name as rustc expects.
+ #[rustc_std_internal_symbol]
+ #[allow(non_upper_case_globals)]
+ #[linkage = "weak"]
+ fn __rust_alloc_error_handler(_size: usize, _align: usize) {
+ // TODO(lukasza): Investigate if we can just call `std::process::abort()` here.
+ // (Not really _needed_, but it could simplify code a little bit.)
+ unsafe { ffi::crash_immediately() }
+ }
}
// TODO(crbug.com/408221149): conditionally include the FFI glue based on

View file

@ -1,20 +1,15 @@
# Contributor: lauren n. liberda <lauren@selfisekai.rocks>
# Maintainer: Antoine Martin (ayakael) <dev@ayakael.net>
# Maintainer: psykose <alice@ayaya.dev>
pkgname=electron
pkgver=35.5.0
_gittag=v"${pkgver/_beta/-beta.}"
pkgrel=0
_chromium=134.0.6998.205
_copium_tag=134.0
_depot_tools=495b23b39aaba2ca3b55dd27cadc523f1cb17ee6
pkgver=21.4.2
pkgrel=1
_chromium=106.0.5249.199
_depot_tools=d85c5bc6ff0dee8171007d947aabc3dfa6dd433b
pkgdesc="Electron cross-platform desktop toolkit"
url="https://github.com/electron/electron"
arch="aarch64 x86_64" # same as chromium
license="MIT"
depends="gtk+3.0 so:libudev.so.1 xdg-utils"
_llvmver=20
makedepends="
ada-dev
alsa-lib-dev
aom-dev
bash
@ -23,20 +18,17 @@ makedepends="
bzip2-dev
c-ares-dev
cairo-dev
clang$_llvmver-dev
clang-dev
clang-extra-tools
compiler-rt
crc32c-dev
cups-dev
curl-dev
dav1d-dev
dbus-glib-dev
double-conversion-dev
eudev-dev
ffmpeg-dev
findutils
flac-dev
flatbuffers-dev
flex
freetype-dev
gperf
@ -44,10 +36,9 @@ makedepends="
gn
gzip
harfbuzz-dev
hdrhistogram-c-dev
hunspell-dev
http-parser-dev
hwdata-dev
hwids-usb
java-jdk
jpeg-dev
jsoncpp-dev
@ -57,12 +48,11 @@ makedepends="
libavif-dev
libbsd-dev
libcap-dev
libdrm-dev
libevent-dev
libexif-dev
libgcrypt-dev
libjpeg-turbo-dev
libnotify-dev
libsecret-dev
libusb-dev
libva-dev
libwebp-dev
@ -75,220 +65,183 @@ makedepends="
libxslt-dev
linux-headers
lld
llhttp-dev
llvm$_llvmver
llvm
mesa-dev
minizip-dev
nghttp2-dev
nodejs
npm
nss-dev
openh264-dev
opus-dev
pciutils-dev
perl
pipewire-dev
pulseaudio-dev
py3-httplib2
py3-jinja2
py3-parsing
py3-setuptools
py3-six
python3
re2-dev
rsync
rust
rust-bindgen
samurai
simdutf-dev
snappy-dev
speex-dev
spirv-tools-dev
sqlite-dev
woff2-dev
xcb-proto
yarn
zlib-dev
zstd-dev
"
subpackages="$pkgname-lang $pkgname-dev"
# the lower patches are specific to electron, the top ones are from the equivalent chromium version
source="
https://ayakael.net/api/packages/mirrors/generic/electron/$_gittag/electron-$_gittag-$_chromium.tar.zst
copium-$_copium_tag.tar.gz::https://codeberg.org/selfisekai/copium/archive/$_copium_tag.tar.gz
source="https://dev.alpinelinux.org/archive/electron/electron-$pkgver.tar.xz
angle-wayland-include.patch
canonicalize-file-name.patch
chromium-VirtualCursor-standard-layout.patch
chromium-revert-drop-of-system-java.patch
compiler.patch
disable-dns_config_service.patch
disable-failing-tests.patch
fc-cache-version.patch
fix-opus.patch
fstatat-32bit.patch
chromium-use-alpine-target.patch
credentials-sys-types-header.patch
default-pthread-stacksize.patch
dns-resolver.patch
fix-crashpad.patch
fix-missing-cstdint-include-musl.patch
fix-narrowing-cast.patch
gdbinit.patch
generic-sensor-include.patch
musl-auxv.patch
jsoncpp.patch
memory-tagging-arm64.patch
musl-sandbox.patch
musl-tid-caching.patch
musl-v8-monotonic-pthread-cont_timedwait.patch
no-execinfo.patch
no-glibc-version.patch
no-mallinfo.patch
no-res-ninit-nclose.patch
no-sandbox-settls.patch
partalloc-no-tagging-arm64.patch
pvalloc.patch
no-stat-redefine.patch
nullptr-t.patch
partition-atfork.patch
py3.11.patch
quiche-arena-size.patch
scoped-file-no-close.patch
temp-failure-retry.patch
pipewire-1.4.patch
gperf-3.2-fix.patch
0001-Reland-Use-global_allocator-to-provide-Rust-allocato.patch
0002-Call-Rust-default-allocator-directly-from-Rust.patch
0003-Roll-rust-only-f7b43542838f0a4a6cfdb17fbeadf45002042.patch
0004-Drop-remap_alloc-dep.patch
0005-rust-Clean-up-build-rust-allocator-after-a-Rust-tool.patch
wtf-stacksize.patch
electron_icon.patch
electron_python-jinja-3.10.patch
electron_webpack-hash.patch
electron_unbundle-node.patch
electron_system-zlib-headers.patch
icon.patch
python-jinja-3.10.patch
system-node.patch
vector-const.patch
webpack-hash.patch
default.conf
electron.desktop
electron-launcher.sh
"
_copium_patches="
cr131-v8-non4k-pages.patch
cr133-ffmpeg-no-noh264parse.patch
cr133-is-musl-libcxx.patch
cr133-mv2-still-not-dead.patch
cr134-unbundle-simdutf.patch
"
# Avoid conflicting providers
sonameprefix="$pkgname:"
# tests are todo for some base checks
options="!check net suid"
builddir="$srcdir/electron-$_gittag-$_chromium"
options="!check suid"
export CC=clang-$_llvmver
export CXX=clang++-$_llvmver
# clang uses much less memory (and this doesn't support gcc)
export CC=clang
export CXX=clang++
# required to find the tools
export AR=llvm-ar
export NM=llvm-nm
export LD=clang++-$_llvmver
export LD=clang++
# less log spam, reproducible
export CFLAGS="${CFLAGS/-g/} -O2 -Wno-builtin-macro-redefined -Wno-deprecated-declarations -Wno-shift-count-overflow -Wno-ignored-attributes"
export CXXFLAGS="${CXXFLAGS/-g/} -O2 -Wno-builtin-macro-redefined -Wno-deprecated-declarations -Wno-invalid-constexpr"
export CPPFLAGS="${CPPFLAGS/-g/} -D__DATE__= -D__TIME__= -D__TIMESTAMP__="
case "$CARCH" in
aarch64|arm*|riscv64)
# not supported by clang here
export CFLAGS="${CFLAGS/-fstack-clash-protection}"
export CXXFLAGS="${CXXFLAGS/-fstack-clash-protection}"
;;
esac
export CFLAGS="${CFLAGS/-g/} -O2 -Wno-unknown-warning-option -Wno-builtin-macro-redefined"
export CXXFLAGS="${CXXFLAGS/-g/} -O2 -Wno-unknown-warning-option -Wno-builtin-macro-redefined"
export CPPFLAGS="$CPPFLAGS -D__DATE__= -D__TIME__= -D__TIMESTAMP__="
# breaks chromium-based stuff
export CXXFLAGS="${CXXFLAGS/-D_GLIBCXX_ASSERTIONS=1}"
# workaround to error: undefined symbol: __rustc::__rust_dealloc
# with 000*.patch patches
export RUSTC_BOOTSTRAP=1
_gn_flags() {
echo "$@"
}
# creates a dist tarball that does not need to git clone everything at build time.
_distbucket="sakamoto/lnl-aports-snapshots/"
snapshot() {
deps
# vpython3 execs system python3 with this set
export VPYTHON_BYPASS="manually managed python not supported by chrome operations"
export CHROMIUM_BUILDTOOLS_PATH="$srcdir/src/buildtools"
export DEPOT_TOOLS_UPDATE=0
mkdir -p "$srcdir"
cd "$srcdir"
git clone --branch=$_chromium --depth=1 \
https://chromium.googlesource.com/chromium/src.git
if ! [ -d depot_tools ]; then
(
git clone --depth 1 -b main https://chromium.googlesource.com/chromium/tools/depot_tools.git
cd depot_tools
git fetch --depth 1 origin $_depot_tools
git checkout $_depot_tools
)
fi
git clone https://github.com/electron/electron.git
(
git clone --depth 1 -b main https://chromium.googlesource.com/chromium/tools/depot_tools.git
cd depot_tools
git fetch --depth 1 origin $_depot_tools
git checkout $_depot_tools
)
export PATH="$PATH:$srcdir/depot_tools"
echo "solutions = [
{
\"name\": \"src/electron\",
\"url\": \"https://github.com/electron/electron.git@$_gittag\",
\"url\": \"file://$srcdir/electron@v$pkgver\",
\"deps_file\": \"DEPS\",
\"managed\": False,
\"custom_deps\": {
\"src\": \"https://chromium.googlesource.com/chromium/src.git@$_chromium\",
\"src\": None,
},
\"custom_vars\": {},
},
]" > .gclient
python3 depot_tools/gclient.py sync \
--no-history \
--with_branch_heads \
--with_tags \
--nohooks
python3 src/build/landmines.py
python3 src/build/util/lastchange.py -o src/build/util/LASTCHANGE \
python3 src/build/util/lastchange.py -o src/build/util/LASTCHANGE
python3 src/build/util/lastchange.py -s src/third_party/dawn \
--revision src/gpu/webgpu/DAWN_VERSION
python3 src/build/util/lastchange.py -m GPU_LISTS_VERSION \
--revision-id-only --header src/gpu/config/gpu_lists_version.h
python3 src/build/util/lastchange.py -m SKIA_COMMIT_HASH \
-s src/third_party/skia --header src/skia/ext/skia_commit_hash.h
# rolled newer chromium with it included
sed -i '/reland_mojom_ts_generator_handle_empty_module_path_identically_to.patch/d' src/electron/patches/chromium/.patches
# why?
cp -r electron/patches/ffmpeg src/electron/patches/
python3 src/electron/script/apply_all_patches.py \
src/electron/patches/config.json
python3 electron/script/apply_all_patches.py \
electron/patches/config.json
mv src $pkgname-$_gittag-$_chromium
python3 src/tools/update_pgo_profiles.py \
--target=linux \
update \
--gs-url-base=chromium-optimization-profiles/pgo_profiles
python3 src/tools/download_optimization_profile.py \
--newest_state=src/chrome/android/profiles/newest.txt \
--local_state=src/chrome/android/profiles/local.txt \
--output_name=src/chrome/android/profiles/afdo.prof \
--gs_url_base=chromeos-prebuilt/afdo-job/llvm
mv src $pkgname-$pkgver
# extra binaries are most likely things we don't want, so nuke them all
for elf in $(scanelf -RA -F "%F" $pkgname-$_gittag-$_chromium); do
for elf in $(scanelf -RA -F "%F" $pkgname-$pkgver); do
rm -f "$elf"
done
msg "generating tarball.. (this takes a while)"
tar -cf $pkgname-$_gittag-$_chromium.tar \
tar -cf $pkgname-$pkgver.tar \
--exclude="ChangeLog*" \
--exclude="testdata/*" \
--exclude="test_data/*" \
--exclude="android_rust_toolchain/*" \
--exclude="third_party/instrumented_libs/binaries" \
--exclude="testdata/" \
--exclude="test_data/" \
--exclude="android_rust_toolchain/toolchain/" \
--exclude-backups \
--exclude-caches-all \
--exclude-vcs \
$pkgname-$_gittag-$_chromium
$pkgname-$pkgver
zstd --auto-threads=logical --ultra --long -22 -T"${ZSTD_LIMIT:-0}" -vv $pkgname-$_gittag-$_chromium.tar -o "$SRCDEST"/$pkgname-$_gittag-$_chromium.tar.zst
mcli cp "$SRCDEST"/$pkgname-$_gittag-$_chromium.tar.zst "$_distbucket"
xz --memlimit=60GB -T0 -e -9 -vv -k $pkgname-$pkgver.tar
}
prepare() {
default_prepare
for i in $_copium_patches; do
case "$i" in
*.patch)
msg "${i%::*}"
patch -p1 -i "$srcdir/copium/$i" || failed="$failed $i"
;;
esac
done
if [ -n "$failed" ]; then
error "The following patches failed to apply:"
for i in $failed; do
printf " %s\n" "$i" >&2
done
exit 1
fi
git init -q .
git init .
# link to system tools
ln -sfv /usr/bin/clang-format buildtools/linux64/clang-format
@ -298,57 +251,49 @@ prepare() {
(
cd electron
git init -q .
git init .
git config user.email "example@example.com"
git config user.name "example"
git config commit.gpgsign false
git add LICENSE
git commit -m "init"
git tag "$_gittag"
git pack-refs
yarn install --frozen-lockfile --ignore-scripts
git tag "v$pkgver"
# jesus christ what the fuck is wrong with you?
touch .git/packed-refs
yarn install --frozen-lockfile
)
(
cd third_party/node
./update_npm_deps
npm ci
)
# reusable system library settings
# flatbuffers - tensorflow has a few static_asserts for a specific patch version
# highway - requires highway>=1.1.0 (arm failures)
# libavif - https://github.com/AOMediaCodec/libavif/commit/50a541469c98009016af8dcc9f83a1be79f3a7d9
# libaom - https://aomedia.googlesource.com/aom/+/706ee36dcc82%5E%21/
# but watch this space: https://aomedia-review.googlesource.com/c/aom/+/188606
# jsoncpp, re2, snappy, swiftshader-*, woff2 - requires use_custom_libcxx=false
# icu 76 does not build - https://bugs.gentoo.org/943216
local chromium_use_system="
local use_system="
brotli
crc32c
dav1d
double-conversion
ffmpeg
flac
fontconfig
freetype
harfbuzz-ng
icu
jsoncpp
libaom
libavif
libdrm
libevent
libjpeg
libsecret
libusb
libwebp
libxml
libxslt
openh264
opus
simdutf
re2
snappy
woff2
zlib
zstd
"
for _lib in $chromium_use_system jinja2 libjpeg_turbo unrar; do
for _lib in $use_system libjpeg_turbo; do
msg "Removing buildscripts for system provided $_lib"
_lib="${_lib/swiftshader-/swiftshader/third_party/}"
find . -type f -path "*third_party/$_lib/*" \
\! -path "*third_party/$_lib/chromium/*" \
\! -path "*third_party/$_lib/google/*" \
@ -357,55 +302,20 @@ prepare() {
\! -path './third_party/pdfium/third_party/freetype/include/pstables.h' \
\! -path './third_party/harfbuzz-ng/utils/hb_scoped.h' \
\! -path './third_party/crashpad/crashpad/third_party/zlib/zlib_crashpad.h' \
\! -regex '.*\.\(gn\|gni\|gyp\|gypi\|isolate\|py\)' \
\! -regex '.*\.\(gn\|gni\|isolate\|py\)' \
-delete
done
# ada - needs use_custom_libcxx=false
local node_use_system="
llhttp
brotli
cares
corepack
histogram
nghttp2
nghttp3
ngtcp2
zlib
"
# some of these are provided by system, e.g. brotli. some are from chromium,
# e.g. boringssl (as openssl). some are not in use at all (corepack)
for _lib in $node_use_system openssl; do
msg "Removing buildscripts for $_lib"
find . -type f -path "*third_party/electron_node/deps/$_lib/*" \
\! -path "*third_party/electron_node/deps/$_lib/chromium/*" \
\! -path "*third_party/electron_node/deps/$_lib/google/*" \
\! -regex '.*\.\(gn\|gni\|gyp\|gypi\|isolate\|py\)' \
-delete
done
# XXX: hack. unbundle-node.patch uses this list to switch things
# in config.gypi. https://github.com/electron/electron/issues/40836
echo $node_use_system > third_party/electron_node/use_system.txt
rm -rf third_party/electron_node/tools/inspector_protocol/jinja2
# https://groups.google.com/a/chromium.org/d/topic/chromium-packagers/9JX1N2nf4PU/discussion
touch chrome/test/data/webui/i18n_process_css_test.html
# Use the file at run time instead of effectively compiling it in
sed 's|//third_party/usb_ids/usb.ids|/usr/share/hwdata/usb.ids|g' \
-i services/device/public/cpp/usb/BUILD.gn
msg "Running debundle script"
python3 build/linux/unbundle/replace_gn_files.py --system-libraries \
$chromium_use_system
$use_system
python3 third_party/libaddressinput/chromium/tools/update-strings.py
# flatc is used in build workflows since https://crrev.com/c/5595037,
# but the pre-generated files are still checked-in. remove to make sure
# they're not used. (if used, they will break builds on version mismatch.)
# https://github.com/tensorflow/tensorflow/issues/62298
# find third_party/tflite/ -name '*_generated.h' -delete
# prevent annoying errors when regenerating gni
sed -i 's,^update_readme$,#update_readme,' \
third_party/libvpx/generate_gni.sh
@ -418,100 +328,79 @@ prepare() {
sed -i -e 's/\<xmlMalloc\>/malloc/' -e 's/\<xmlFree\>/free/' \
third_party/blink/renderer/core/xml/*.cc \
third_party/blink/renderer/core/xml/parser/xml_document_parser.cc \
third_party/libxml/chromium/*.cc
third_party/libxml/chromium/*.cc \
third_party/maldoca/src/maldoca/ole/oss_utils.h
_configure
}
_configure() {
cd "$builddir"
msg "Configuring build"
case "$USE_CCACHE" in
1)
local cc_wrapper="ccache"
;;
*)
local cc_wrapper=""
;;
esac
local maglev=true
local symbol_level=0
local vaapi=true
# shellcheck disable=2089
local gn_config="
clang_base_path=\"/usr\"
custom_toolchain=\"//build/toolchain/linux/unbundle:default\"
host_toolchain=\"//build/toolchain/linux/unbundle:default\"
import(\"//electron/build/args/release.gn\")
blink_enable_generated_code_formatting=false
cc_wrapper=\"$cc_wrapper\"
chrome_pgo_phase=0
clang_base_path=\"/usr\"
clang_use_chrome_plugins=false
clang_version=\"$_llvmver\"
custom_toolchain=\"//build/toolchain/linux/unbundle:default\"
disable_fieldtrial_testing_config=true
enable_hangout_services_extension=true
enable_nacl=false
enable_nocompile_tests=false
enable_stripping=false
enable_rust=true
enable_vr=false
fatal_linker_warnings=false
ffmpeg_branding=\"Chrome\"
host_toolchain=\"//build/toolchain/linux/unbundle:default\"
icu_use_data_file=false
icu_use_data_file=true
is_cfi=false
is_clang=true
is_component_ffmpeg=true
is_debug=false
is_musl=true
is_official_build=true
symbol_level=0
treat_warnings_as_errors=false
angle_enable_gl_null=false
build_tflite_with_xnnpack=false
build_with_tflite_lib=false
disable_fieldtrial_testing_config=true
enable_hangout_services_extension=true
enable_library_cdms=false
enable_media_remoting=false
enable_nacl=false
enable_paint_preview=false
enable_reading_list=false
enable_remoting=false
enable_reporting=false
enable_screen_ai_service=false
enable_service_discovery=false
enable_stripping=false
enable_vr=false
enable_xz_extractor=false
ozone_platform_headless=false
link_pulseaudio=true
proprietary_codecs=true
rtc_link_pipewire=true
rtc_use_pipewire=true
rustc_version=\"yes\"
rust_bindgen_root=\"/usr\"
rust_sysroot_absolute=\"/usr\"
safe_browsing_use_unrar=false
symbol_level=$symbol_level
treat_warnings_as_errors=false
use_custom_libcxx=true
use_lld=true
use_pulseaudio=true
use_safe_libstdcxx=false
use_system_libffi=true
use_sysroot=false
use_thin_lto=false
use_vaapi=$vaapi
v8_enable_maglev=$maglev
skia_use_dawn=false
use_custom_libcxx=false
use_dawn=false
use_system_ada=false
use_system_cares=true
use_system_histogram=true
use_gnome_keyring=false
use_pulseaudio=true
use_sysroot=false
use_system_freetype=true
use_system_harfbuzz=true
use_system_lcms2=true
use_system_libffi=true
use_system_llhttp=true
use_system_nghttp2=true
use_system_libdrm=true
use_system_libjpeg=true
use_system_libwayland=true
use_system_wayland_scanner=true
use_system_zlib=true
use_vaapi=true
"
# shellcheck disable=2086,2090,2116
gn gen out/Release --args="$(echo $gn_config)" \
gn gen out/Release \
--args="$(echo $gn_config)" \
--export-compile-commands
}
build() {
export ELECTRON_OUT_DIR="$builddir"/out/Release/
ninja -C out/Release \
copy_node_headers \
electron_dist_zip \
node_gypi_headers \
node_version_header
node_version_header \
tar_headers
}
package() {
@ -525,7 +414,8 @@ package() {
install -Dm755 "$srcdir"/default.conf "$pkgdir"/etc/electron/default.conf
mkdir -p "$pkgdir"/usr/include/electron
cp -rv "$builddir"/out/Release/gen/node_headers "$pkgdir"/usr/include/electron
mv -v "$builddir"/out/Release/gen/node_headers "$pkgdir"/usr/include/electron
ln -sv /usr/include/electron/node_headers/include/node "$pkgdir"/usr/include/electron/node
mkdir -p "$pkgdir"/usr/include/electron/node_headers/include/nan
@ -551,40 +441,42 @@ lang() {
}
sha512sums="
a30c115f17f9811347f8713fa604b56244e39facbbab7b6b82cbc3049ed12b429b06f2faafa8a48dd727b655753848d099ba6720fae15c9bb00e6a427dc2758e electron-v35.5.0-134.0.6998.205.tar.zst
c1857b5d6975650f915f3db552666f521822b857e39958ccfb54129f3878f272deaafc3dd446bc8441a5e84f075791feeeb62841b74bb555d8c546bfe231d164 copium-134.0.tar.gz
29bb685e03356a77df5fd347cdf55194cc8b3265c421cc76e54d64edefc329dbcb052deb26b22e8f587ce68456876c071de1b7d258dd0fcc6ee66c875ec4a020 chromium-revert-drop-of-system-java.patch
d9cc4a37a0311d23ae315a8d8124f8dbf60db8cc4a3943818638174b20387f1d770d00871f6608957b246ad956abca43c22ea0b072724287f2947e1909e47323 compiler.patch
4057cc78f10bfd64092bc35a373869abb1d68b880cdbca70422f39ffd78a929c19c7728d4d4c40709aaba25581148a93ae5343e724849fd35323062ed68753fa disable-dns_config_service.patch
2470904846e3adde2c9506f9e78220daca0932320b628dd3d427bf2b7c17a8f7880cb97e787b046c28de7aca642e1a8d30824d6049905976da77e7473baa64da disable-failing-tests.patch
5fc5c012c1db6cf1ba82f38c6f3f4f5ca3a209e47ac708a74de379b018e0649b7694877c9571ef79002dde875ffc07b458a3355425f1c01867f362c66c2bc1bf fc-cache-version.patch
f7fe8a8e5eee17310fb4c3e5de621b4b89ea0567f75033c65be970950cca62b50c5fbd004735e4ad39bdb2e1638b3b73ea614203d323f8181ae18c71edf302a0 fix-opus.patch
c63dee5044353eb306a39ca1526158c0f003ab310ecb03d1c368dc2a979454590c84b8d3c15484517d5e66bb8add9b231da9abbadf2e50850abd72ac1345c4ab fstatat-32bit.patch
33ee60863cc438ef57ffef92ba4cf67a856a5ffc16138bce241bcf87e47b15154aa86918e793c26f7ec4dc62a445257ad5673ed7001daf22c4043cf6cc57da7f gdbinit.patch
36a764fa73443b47d38050b52dbe6ad2fa8d67201ff4ccdbad13b52308ef165ca046aac6f9609fe35890a6485f0f3e672e78cc41e3e44f3cdc7f145e540524e8 generic-sensor-include.patch
99bcc7dd485b404a90c606a96addab1d900852128d44fb8cea8acc7303189ef87c89a7b0e749fd0e10c5ef5f6bf1fadeb5c16a34503cab6a59938ce2653d887e musl-auxv.patch
51f1959bd622af26a1c3a1f4b0ad9a5bfa461057aa4cf9960c568dddf8ac47d55989c277f5d5ab5db040a04c54925a531af7a1cc767559218b408eaa6bdd7577 musl-sandbox.patch
e7163ac5810ac85366cef2447412287c856e3d67c6b77f219a6e5a418b1965b98e449c409424ad0704a5bded9355dd0aec3dc4585918ce5a2ab36c079707afe2 musl-tid-caching.patch
a250cff50d282b02ce0f28880d0a2b4fb8e7df51bc072bfeeddc561c29a7c76453dbcbc7b17b82966a7b30a31409d2555720d1dcf963e1b3fb8a2a06a6abcf46 no-execinfo.patch
0b41aeb6b212f9c3f61aa0a8d3085c9e865a2e68f3270ceec2376aab67f337ac46eaea7da36d3fd7219e2a1cb731b7aa2d3fb619a374d2b7653976b9f4f384bb no-mallinfo.patch
e4c4e5bc6f828f9c883dd418c0ba01887949c29c311f76206a1ec29f620b0c0ba0452949dc2778a9c46ea066405857536964a36436a68eecf7da7952736333cf no-res-ninit-nclose.patch
6dc4d8dc92e685dace62265a1ddb3aebc558aed54d20ff6d36b030be0c48d7e84662326c31363612492574d9a03c62653cdc21a60995b97dee1d75cae86a9f9b no-sandbox-settls.patch
f2b08538ff57c50b3772a07ca91845f9d45f4a5112f608b6192d4fb5d7be48f478c0c36194d95ab7bbf933e0278e5c6d578619d8643895cdc40386eebc5b975f partalloc-no-tagging-arm64.patch
03f829a2da633533ef3fd0f287f5ec602d936a97a98b53cd2415553c2537ae9d571f35397ca7c9fb3f4b0806c300e3b189569f8d979ca132e1a2a4dae7206396 pvalloc.patch
e48693e6b7aeebf69a5acbf80d9a35defe4c23835121dfeb58b051ac7c527e758a41004f4d193274fe1b01c0bfb1dbc77b09cb6a404a3fdee507a2918afb0edb temp-failure-retry.patch
1814096bc611e7f56cc5c570214dae715a4cda1fba96a6b585a73a1abc8b9161efaa799dc83887dac531dbafe9479bbe235cabe1a61cb3081e268c53a6144908 pipewire-1.4.patch
4bc087a1e5acbb0f8f884756b40c127df88699ecd6eb42c4aff6691b87239bb6915dd102e9ef2544502a12bff011859ad497206940473e6b0a0fd75afb562a4c gperf-3.2-fix.patch
858f8e3c544abf3ffe0f54ba303713e9b4058e15996c8dc10bab7c999d82e1960756325a41d6bebb3a00404efb8e51c299f61f60de4feaac6b621757ab85d329 0001-Reland-Use-global_allocator-to-provide-Rust-allocato.patch
077497c1598c7db9f4c23e000e9b86e1833de4866479fd921313543ad599e141427bf38ae687f84c3da59af68f09a776265c2a569e1a7abfa80440231baef10a 0002-Call-Rust-default-allocator-directly-from-Rust.patch
5002aa73eb19b87c702eef5b087ecb3a2679142c28cd95a5a9571aeffe24e6944497e862058ed1d609317a723cdec1678f84543235fb93f12653a92b92309efe 0003-Roll-rust-only-f7b43542838f0a4a6cfdb17fbeadf45002042.patch
d2e414135d2b046dd9efe277f88062bcb0a94749a17a014309260b1469305d55059931b9531572713c8e516897e30fd2f5317948ece1581ffe9b7b6c01078a6d 0004-Drop-remap_alloc-dep.patch
fe66228c0eefe3d08e2a7955b11e6a46f58f477befceba1628765fb016f30eb0bb02723aeedcabdb1ea3b84c42b5ea65073998e0a64f5ce082120fc7e65dee9a 0005-rust-Clean-up-build-rust-allocator-after-a-Rust-tool.patch
465107da7818b237e3c144a318ab80c3c9343b51ed38b8971ef204692d13346929becbe94cefad4c153788d3a200642143584d5ca070f6304e768ba2139c19ec electron_icon.patch
e05180199ee1d559e4e577cedd3e589844ecf40d98a86321bf1bea5607b02eeb5feb486deddae40e1005b644550331f6b8500177aa7e79bcb3750d3c1ceb76c3 electron_python-jinja-3.10.patch
2aa340854316f1284217c0ca17cbf44953684ad6c7da90815117df30928612eb9fb9ffb734b948dfc309cd25d1a67cd57f77aac2d052a3dd9aca07a3a58cbb30 electron_webpack-hash.patch
c7f57929943a86f9e5f333da9d5691da88038770eeb46dd0a0719962c934deb2879f0e7a1ed714e9383e38ee4d68eb754501f362c4d7cdee76cfc2e980b21272 electron_unbundle-node.patch
4d9287d4cdfe27fbfb7be3d4b26c0c40edbd6a0c3ff926d60f2093ca09c15bcb58e20c2ccc8c0606aafd66c6d25a54225bc329cb056d8c5b297db4c6d0e768e6 electron_system-zlib-headers.patch
e8ea87c547546011c4c8fc2de30e4f443b85cd4cfcff92808e2521d2f9ada03feefb8e1b0cf0f6b460919c146e56ef8d5ad4bb5e2461cc5247c30d92eb4d068e default.conf
d2a6de4bc851de81d9f19fdb46b023abd77e27acc33bff8fc7cec7b84d0aeda3008e00289444f4f2423b20efcc72e4c3036f809fb6ce1ad9828983bee4721ff2 electron-21.4.2.tar.xz
f19ba0c0f542115e6f53019659df256471e811a23d2f37569c9d4dfa265c0c1ace3e62c74d7507f82e6b7b4152c704e651810a00616f8f531592b14bb2af01d9 angle-wayland-include.patch
252b37a2ecc5e7a25385943045f426dc2e30991b28d206ceaff1be7fd8ffeeb024310a8fca6b3e69a4b1c57db535d51c570935351053525f393682d5ecd0f9a9 canonicalize-file-name.patch
ac0a80174f95d733f33ddc06fc88cdcf7db0973378c28d8544dc9c19e2dabeac47f91c99b3e7384f650b3405554a9e222543f0860b6acc407c078a8c9180d727 chromium-VirtualCursor-standard-layout.patch
c4654d5b23c6f5d9502507e534fe1951d6749c62251e49b6adfe10d1569431e7f7a5a6fa5ff09ec30984415ced27a5e20985df8c91295de34af3c84557fa5b91 chromium-revert-drop-of-system-java.patch
d2b5b0396173367fcf1804aaee8e7fbefce9c63ac2a91c3a1ede759cb85e567317a57e4d4f82d3ca021682fb7856e15c074276a03eda946d9b28e1cb706b07ad chromium-use-alpine-target.patch
ec04bf43278a19d4bb090eddd636ad7093c7e757cb2ffa1875971e0709761174790e109b9be03a8e299d4077a87fbd8dabd301b8754bb6fe53c055396e8af556 credentials-sys-types-header.patch
4ab8261bf95547b10df44e4d528c06a64c33602c10a1e09d531190dc8947ba6ef9e69145405b801cd364707570f791fee6d93e3bf5d57831f5a85212ddf01729 default-pthread-stacksize.patch
f011f66e5aae5a6f0d440492ee9400878b47b66214c1bc8dc1477fdd07ad0a508cdbb74415e55e27085c4e61e112e7c2ae2edfa217f2fb387e13097b24cb17b1 dns-resolver.patch
9d1edb1e0624ee61825e3af23fbb8c5dbc09d2b92d7769d19f8ca618edae8de8a3e051fedf4ad92c230e1373dc8495922c46971aef93a580c04ad80bc33516c0 fix-crashpad.patch
8bebf4a9d5225c6e47edc5b07c4b97be24a45cc221f49632836915ceeb4ecb69b7f79a31ea7f82171cde3443f45fec541f409892542cf1014e81aa6acd01566d fix-missing-cstdint-include-musl.patch
efe97b9dd2ec5965fa0cdf1b2a3c01253835c2df710da7ea105c4ce008c11f9caaf8b4321736a2b91f06d8d61972c08e225b16509dc05176a2c39337688ad5b9 fix-narrowing-cast.patch
cf73cbe5bf19d6a22157fb7aafb43f326885e852fc6292728f4ed1cd145d07ba5af51b6ec808095136cd406478aaa427ee1b9611c855fbd87976e1a91e1609bd gdbinit.patch
fa2637f92f851614347e296fbab744af2c5d7edcbb444aeb4a5d3182a8ec2549593d75e717d7e78e9b2a7257e693b48fc88c149c1591052d7ae802f4fda0a775 jsoncpp.patch
e0afb7066c2cb41aa461feb9e45e571517229deab9d06186490b527783a7ba826a4d67d3a14a33a164eea64fa561eb5b93a1d4dfd0acc2e7a9eb038e6ee273db memory-tagging-arm64.patch
fdf8ba7badbd5b61d415ad9d49c66b6ef0a6a40ec95a47e13af48711fe1bd3a5574e987929a3c486cdd02c239863b8517e7f834cecd30f156479e43a9441a18e musl-sandbox.patch
85c2842a251a3f8aa59c701ca5f2ce2f5d49b5c7e4773d5387dc597447fb47c9d876f5fb308576686c9a8abc7e35cfc172b6cdfb9f5f1dc1509329e3590b38d8 musl-tid-caching.patch
d2825aa9525fcbb53791f0ef2502c0f444a9d668f09db6ae4987b94dc4d0e6f1cf58a6e9e772ab11c896a469ca32242feb3ad9c9dbb20df9316cd74151ab0ec9 musl-v8-monotonic-pthread-cont_timedwait.patch
ebd5cf28277853dc5e984961c370ab4a6331488ae7cff45083fea0470262f56486664bc9bd7947fdd796c8635e479633c4d08cfd89270c0310f3ec21cc6642d2 no-execinfo.patch
f0bf97a80e663041e33cb0468fd8c47c5f351d6de61059ce1f359a813c40db8e247eaef294c3f562c0a8204e4f1992a918f1d879b1da9891027500e21f482b79 no-glibc-version.patch
10ae0f74a4c1db899b571508100af63e5af8d0f7c41a37fc9b7987cbf9f27f4c55894c02d6820957d7522a528929059f562f96c2f05fd6509f60c6c71d9d8256 no-mallinfo.patch
a5aa82c30402773903db1d3876208132fdef175f56ebc0ce1ee4c104a98d498d709c5565c4381736c04c238203b3c8a9cd7a5b5e69876f6afb65d7fc48df23d0 no-res-ninit-nclose.patch
2c0d7239728d98c0564ad7b81d6d243e58e56de58a21357fa30c39e333fc29c1aa98529c6e1b6fa7694169b513391ca27fa542f69b483bbde644cc2ed739bbdd no-stat-redefine.patch
5e9f6279698195467e3b506cea9be0d97ec2d970672b1b12d3d7880eec4f6f53b8f92942dc3fc6738b02889382534ce0f4310a1f94b33e21f8fbc70b85640b81 nullptr-t.patch
ad563e29ac7d83c203f5af966c4ed3ebdeb5c997835a45fb28ecde08dde5231d0a775fb413f44867af28724504c42316b27d5a6aaa602057642dcbdf7ec20a7a partition-atfork.patch
65aa0c7c9909a803e59b88ecb6d79c4db491079f3324f7bd02ee485a7bb7a81674b8f0591dab766c97070a401116db7f629fee36af7416a0fefc38f4ae0ad13d py3.11.patch
083ed731e0a3788f2cb04a3035022fbb3e4db99eba01516ea233ea3229f7d898943d8115463a48655ac83eb3cc7a48aceb8bf17c68930a5a1d83b1af95dfade8 quiche-arena-size.patch
3cf36b269e9fcfa74975d267bbf31bef68b533a51672e5ed81ae511a70f28a45206168af370961a3dab5695ddaff41cb8839c8c2fa53f22a9f3c88d207cb2996 scoped-file-no-close.patch
f2f7673f9e793dfbf4456ff8c2be785ea551c36bd512572245d04bf44da08b0133e98d85a1ffd51158009754c83121cad48d755cbc153735df2d2e73233856c0 temp-failure-retry.patch
1d4e8c6e65205e6b72af47b9a2fa6f96aaada9b7d5a74f4e11a345a885df3078b523d02aaf8e9dac3aa30d72bbbd07cd6dc7edcf44fb9ae57a7f81251c398f65 wtf-stacksize.patch
905565c10f5e5600e7d4db965c892cc45009a258e9995da958974d838ace469e1db1019195307e8807860d5b55ba6bfeea478b1f39a9b99e82c619b2816a1a22 icon.patch
e05180199ee1d559e4e577cedd3e589844ecf40d98a86321bf1bea5607b02eeb5feb486deddae40e1005b644550331f6b8500177aa7e79bcb3750d3c1ceb76c3 python-jinja-3.10.patch
26a8e4040e69f335a7104f42d012b9d933a40985b33a7be02add27a801c097c5a2be4c6e69faf9175ce8945210ae4c5592ecad2123ccff2beee5473194a765e3 system-node.patch
71571b15cf8bd6259b7fd22bea0e46b64890f3db776365de33fe539f26ce9ef99459e05c3dde9434c3657225bc67160abc915acd93033cb487c770c6a2a5975f vector-const.patch
2aa340854316f1284217c0ca17cbf44953684ad6c7da90815117df30928612eb9fb9ffb734b948dfc309cd25d1a67cd57f77aac2d052a3dd9aca07a3a58cbb30 webpack-hash.patch
08460b15037511a4e5469ceac6ae5dd4db4c8cb87c129aaaf40ba58b16c60b8a307ffdd85805efead235758abed09ec31db1ef4cf9159f7b9acdcee3031bc96c default.conf
191559fc7aa1ea0353c6fb0cc321ee1d5803a0e44848c8be941cfab96277b0de6a59962d373e2a2a1686c8f9be2bcf2d2f33706759a339a959e297d3f7fda463 electron.desktop
5f7ba5ad005f196facec1c0f26108356b64cafb1e5cfa462ff714a33b8a4c757ac00bfcb080da09eb5b65032f8eb245d9676a61ec554515d125ed63912708648 electron-launcher.sh
03750694e5e0b66f084c6e43135e60be15abb059e23486346ee4352dcc236984f2f35467b47f2b2ad46c98c22091cc2b978de8e73680febadba169d960f13f9f electron-launcher.sh
"

View file

@ -1,59 +0,0 @@
# electron
This is the `electron` package for Alpine Linux.
Please report any issues [using Gitlab](https://gitlab.alpinelinux.org/alpine/aports/-/issues/new) and tag @ayakael
## Building electron
Electron is an application framework based on `chromium`. Just like `chromium`,
and any Google application, the build process is a form of [hostile
architecture] (https://en.wikipedia.org/wiki/Hostile_architecture) It's quite
literally chromium with patches applied on top for the most part. The build
process applies a series of git patches against `chromium` from directories
with a script.
Its source code isn't available as a downloadable tarball. It is only fetchable
using Google's `gclient` available in `depot_tools` with a reimplemented
version in the `teapot` package. By executing, `abuild snapshot`, the tarball
can be fetched and packaged, as long as `gclient` is in your path. For ease of
maintenance, a workflow on [Ayakael's Forge](https://ayakael.net/mirrors/electron)
automatically fetches and packages the source code on new releases and makes it
available in a [generic Forgejo repository](https://ayakael.net/mirrors/-/packages/generic/electron).
## Electron maintenance cycle
Security / bug fixes land from upstream land randomly, but chromium security fixes land
basically weekly around Tuesday in `America/Los_Angeles`. Minor relases only require
an upgrade to the `electron` packages. It is advisable to follow chromium weekly
security fixes, although following `electron` minor releases is fine.
Major version upgrades require a more thorough approach. For one, most changes
can be backported from `chromium` APKBUILD by diffing the previous version
packaged with `electron` with the current (set with `_chromium` var). You also
need to rebuild all `electron` apps, with patches sometimes necessary when
upstream bumps to a new `nodejs` major verion. Major electron releases are
every two `chromium` major releases, with [dates known well ahead]
(https://chromiumdash.appspot.com/schedule) with a few major releases of
`electron` [officially supported at a time](https://www.electronjs.org/docs/latest/tutorial/electron-timelines).
Steps, in a nutshell:
1. Set `pkgver` to up-to-date version
2. Optional: fetch source-code using `abuild snapshot`, making sure `gclient`
is in your path
3. Update source checksum using `abuild checksum`
4. If major update, backport changes from `chromium` aport and bump `pkgrel`
for all electron-based applications.
## Why is this package still in testing
[Work is under way](https://gitlab.alpinelinux.org/alpine/aports/-/issues/15760)
to make this aport ready for `community`
Until that happens, this package is also kept-to-date against the latest
release of Alpine Linux in [Ayakael's Forge](https://ayakael.net/forge/-/packages/alpine/signal-desktop)
This is true of all Ayakael's packages still in `testing`.

View file

@ -0,0 +1,39 @@
Patch-Source: https://github.com/archlinux/svntogit-packages/blob/a353833a5a731abfaa465b658f61894a516aa49b/trunk/angle-wayland-include-protocol.patch
diff -upr third_party/angle.orig/BUILD.gn third_party/angle/BUILD.gn
--- a/third_party/angle.orig/BUILD.gn 2022-08-17 19:38:11.000000000 +0000
+++ b/third_party/angle/BUILD.gn 2022-08-18 11:04:09.061751111 +0000
@@ -489,6 +489,12 @@ config("angle_vulkan_wayland_config") {
if (angle_enable_vulkan && angle_use_wayland &&
defined(vulkan_wayland_include_dirs)) {
include_dirs = vulkan_wayland_include_dirs
+ } else if (angle_enable_vulkan && angle_use_wayland) {
+ include_dirs = [
+ "$wayland_gn_dir/src/src",
+ "$wayland_gn_dir/include/src",
+ "$wayland_gn_dir/include/protocol",
+ ]
}
}
@@ -1073,6 +1079,7 @@ if (angle_use_wayland) {
include_dirs = [
"$wayland_dir/egl",
"$wayland_dir/src",
+ "$wayland_gn_dir/include/protocol",
]
}
diff -upr third_party/angle.orig/src/third_party/volk/BUILD.gn third_party/angle/src/third_party/volk/BUILD.gn
--- a/third_party/angle.orig/src/third_party/volk/BUILD.gn 2022-08-17 19:38:12.000000000 +0000
+++ b/third_party/angle/src/third_party/volk/BUILD.gn 2022-08-18 11:04:36.499828006 +0000
@@ -21,6 +21,9 @@ source_set("volk") {
configs += [ "$angle_root:angle_no_cfi_icall" ]
public_deps = [ "$angle_vulkan_headers_dir:vulkan_headers" ]
if (angle_use_wayland) {
- include_dirs = [ "$wayland_dir/src" ]
+ include_dirs = [
+ "$wayland_dir/src",
+ "$wayland_gn_dir/include/protocol",
+ ]
}
}

View file

@ -0,0 +1,13 @@
no canonicalize_file_name on musl. funnily, the file using this says this is
not portable, but avoids the nonportability of realpath(path, NULL);
--- a/third_party/nasm/config/config-linux.h
+++ b/third_party/nasm/config/config-linux.h
@@ -139,7 +139,7 @@
#define HAVE_ACCESS 1
/* Define to 1 if you have the `canonicalize_file_name' function. */
-#define HAVE_CANONICALIZE_FILE_NAME 1
+/* #define HAVE_CANONICALIZE_FILE_NAME 1 */
/* Define to 1 if you have the `cpu_to_le16' intrinsic function. */
/* #undef HAVE_CPU_TO_LE16 */

View file

@ -0,0 +1,217 @@
needed for libstdc++11 + clang only
diff --git a/sql/recover_module/btree.cc b/sql/recover_module/btree.cc
index 9ecaafe..839318a 100644
--- a/sql/recover_module/btree.cc
+++ b/sql/recover_module/btree.cc
@@ -135,16 +135,25 @@
"Move the destructor to the .cc file if it's non-trival");
#endif // !DCHECK_IS_ON()
-LeafPageDecoder::LeafPageDecoder(DatabasePageReader* db_reader) noexcept
- : page_id_(db_reader->page_id()),
- db_reader_(db_reader),
- cell_count_(ComputeCellCount(db_reader)),
- next_read_index_(0),
- last_record_size_(0) {
+void LeafPageDecoder::Initialize(DatabasePageReader* db_reader) {
+ DCHECK(db_reader);
DCHECK(IsOnValidPage(db_reader));
+ page_id_ = db_reader->page_id();
+ db_reader_ = db_reader;
+ cell_count_ = ComputeCellCount(db_reader);
+ next_read_index_ = 0;
+ last_record_size_ = 0;
DCHECK(DatabasePageReader::IsValidPageId(page_id_));
}
+void LeafPageDecoder::Reset() {
+ db_reader_ = nullptr;
+ page_id_ = 0;
+ cell_count_ = 0;
+ next_read_index_ = 0;
+ last_record_size_ = 0;
+}
+
bool LeafPageDecoder::TryAdvance() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(CanAdvance());
diff --git a/sql/recover_module/btree.h b/sql/recover_module/btree.h
index d76d076..33114b0 100644
--- a/sql/recover_module/btree.h
+++ b/sql/recover_module/btree.h
@@ -102,7 +102,7 @@
//
// |db_reader| must have been used to read an inner page of a table B-tree.
// |db_reader| must outlive this instance.
- explicit LeafPageDecoder(DatabasePageReader* db_reader) noexcept;
+ explicit LeafPageDecoder() noexcept = default;
~LeafPageDecoder() noexcept = default;
LeafPageDecoder(const LeafPageDecoder&) = delete;
@@ -150,6 +150,15 @@
// read as long as CanAdvance() returns true.
bool TryAdvance();
+ // Initialize with DatabasePageReader
+ void Initialize(DatabasePageReader* db_reader);
+
+ // Reset internal DatabasePageReader
+ void Reset();
+
+ // True if DatabasePageReader is valid
+ bool IsValid() { return (db_reader_ != nullptr); }
+
// True if the given reader may point to an inner page in a table B-tree.
//
// The last ReadPage() call on |db_reader| must have succeeded.
@@ -163,14 +172,14 @@
static int ComputeCellCount(DatabasePageReader* db_reader);
// The number of the B-tree page this reader is reading.
- const int64_t page_id_;
+ int64_t page_id_;
// Used to read the tree page.
//
// Raw pointer usage is acceptable because this instance's owner is expected
// to ensure that the DatabasePageReader outlives this.
- DatabasePageReader* const db_reader_;
+ DatabasePageReader* db_reader_;
// Caches the ComputeCellCount() value for this reader's page.
- const int cell_count_ = ComputeCellCount(db_reader_);
+ int cell_count_;
// The reader's cursor state.
//
diff --git a/sql/recover_module/cursor.cc b/sql/recover_module/cursor.cc
index 0029ff9..42548bc 100644
--- a/sql/recover_module/cursor.cc
+++ b/sql/recover_module/cursor.cc
@@ -26,7 +26,7 @@
int VirtualCursor::First() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
inner_decoders_.clear();
- leaf_decoder_ = nullptr;
+ leaf_decoder_.Reset();
AppendPageDecoder(table_->root_page_id());
return Next();
@@ -36,18 +36,18 @@
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
record_reader_.Reset();
- while (!inner_decoders_.empty() || leaf_decoder_.get()) {
- if (leaf_decoder_.get()) {
- if (!leaf_decoder_->CanAdvance()) {
+ while (!inner_decoders_.empty() || leaf_decoder_.IsValid()) {
+ if (leaf_decoder_.IsValid()) {
+ if (!leaf_decoder_.CanAdvance()) {
// The leaf has been exhausted. Remove it from the DFS stack.
- leaf_decoder_ = nullptr;
+ leaf_decoder_.Reset();
continue;
}
- if (!leaf_decoder_->TryAdvance())
+ if (!leaf_decoder_.TryAdvance())
continue;
- if (!payload_reader_.Initialize(leaf_decoder_->last_record_size(),
- leaf_decoder_->last_record_offset())) {
+ if (!payload_reader_.Initialize(leaf_decoder_.last_record_size(),
+ leaf_decoder_.last_record_offset())) {
continue;
}
if (!record_reader_.Initialize())
@@ -99,13 +99,13 @@
int64_t VirtualCursor::RowId() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(record_reader_.IsInitialized());
- DCHECK(leaf_decoder_.get());
- return leaf_decoder_->last_record_rowid();
+ DCHECK(leaf_decoder_.IsValid());
+ return leaf_decoder_.last_record_rowid();
}
void VirtualCursor::AppendPageDecoder(int page_id) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
- DCHECK(leaf_decoder_.get() == nullptr)
+ DCHECK(!leaf_decoder_.IsValid())
<< __func__
<< " must only be called when the current path has no leaf decoder";
@@ -113,7 +113,7 @@
return;
if (LeafPageDecoder::IsOnValidPage(&db_reader_)) {
- leaf_decoder_ = std::make_unique<LeafPageDecoder>(&db_reader_);
+ leaf_decoder_.Initialize(&db_reader_);
return;
}
diff --git a/sql/recover_module/cursor.h b/sql/recover_module/cursor.h
index afcd690..b15c31d 100644
--- a/sql/recover_module/cursor.h
+++ b/sql/recover_module/cursor.h
@@ -129,7 +129,7 @@
std::vector<std::unique_ptr<InnerPageDecoder>> inner_decoders_;
// Decodes the leaf page containing records.
- std::unique_ptr<LeafPageDecoder> leaf_decoder_;
+ LeafPageDecoder leaf_decoder_;
SEQUENCE_CHECKER(sequence_checker_);
};
diff --git a/sql/recover_module/pager.cc b/sql/recover_module/pager.cc
index 58e75de..5fe9620 100644
--- a/sql/recover_module/pager.cc
+++ b/sql/recover_module/pager.cc
@@ -23,8 +23,7 @@
"ints are not appropriate for representing page IDs");
DatabasePageReader::DatabasePageReader(VirtualTable* table)
- : page_data_(std::make_unique<uint8_t[]>(table->page_size())),
- table_(table) {
+ : page_data_(), table_(table) {
DCHECK(table != nullptr);
DCHECK(IsValidPageSize(table->page_size()));
}
@@ -57,8 +56,8 @@
std::numeric_limits<int64_t>::max(),
"The |read_offset| computation above may overflow");
- int sqlite_status =
- RawRead(sqlite_file, read_size, read_offset, page_data_.get());
+ int sqlite_status = RawRead(sqlite_file, read_size, read_offset,
+ const_cast<uint8_t*>(page_data_.data()));
// |page_id_| needs to be set to kInvalidPageId if the read failed.
// Otherwise, future ReadPage() calls with the previous |page_id_| value
diff --git a/sql/recover_module/pager.h b/sql/recover_module/pager.h
index 0e388ddc..99314e3 100644
--- a/sql/recover_module/pager.h
+++ b/sql/recover_module/pager.h
@@ -5,6 +5,7 @@
#ifndef SQL_RECOVER_MODULE_PAGER_H_
#define SQL_RECOVER_MODULE_PAGER_H_
+#include <array>
#include <cstdint>
#include <memory>
@@ -70,7 +71,7 @@
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK_NE(page_id_, kInvalidPageId)
<< "Successful ReadPage() required before accessing pager state";
- return page_data_.get();
+ return page_data_.data();
}
// The number of bytes in the page read by the last ReadPage() call.
@@ -137,7 +138,7 @@
int page_id_ = kInvalidPageId;
// Stores the bytes of the last page successfully read by ReadPage().
// The content is undefined if the last call to ReadPage() did not succeed.
- const std::unique_ptr<uint8_t[]> page_data_;
+ const std::array<uint8_t, kMaxPageSize> page_data_;
// Raw pointer usage is acceptable because this instance's owner is expected
// to ensure that the VirtualTable outlives this.
VirtualTable* const table_;

View file

@ -1,6 +1,4 @@
This was dropped for some reason in 6951c37cecd05979b232a39e5c10e6346a0f74ef
allows using /usr/bin/java instead of a downloaded one (that doesn't work on musl)
--
--- a/third_party/closure_compiler/compiler.py 2021-05-20 04:17:53.000000000 +0200
+++ b/third_party/closure_compiler/compiler.py 2021-05-20 04:17:53.000000000 +0200
@@ -13,8 +13,9 @@

View file

@ -0,0 +1,13 @@
--- a/build/config/compiler/BUILD.gn
+++ b/build/config/compiler/BUILD.gn
@@ -917,8 +917,8 @@
} else if (current_cpu == "arm64") {
if (is_clang && !is_android && !is_nacl && !is_fuchsia &&
!(is_chromeos_lacros && is_chromeos_device)) {
- cflags += [ "--target=aarch64-linux-gnu" ]
- ldflags += [ "--target=aarch64-linux-gnu" ]
+ cflags += [ "--target=aarch64-alpine-linux-musl" ]
+ ldflags += [ "--target=aarch64-alpine-linux-musl" ]
}
if (is_android) {
# Outline atomics crash on Exynos 9810. http://crbug.com/1272795

View file

@ -1,161 +0,0 @@
--- ./build/config/compiler/BUILD.gn.orig
+++ ./build/config/compiler/BUILD.gn
@@ -591,24 +591,6 @@
}
}
- # TODO(crbug.com/40283598): This causes binary size growth and potentially
- # other problems.
- # TODO(crbug.com/40284925): This isn't supported by Cronet's mainline llvm version.
- if (default_toolchain != "//build/toolchain/cros:target" &&
- !llvm_android_mainline) {
- cflags += [
- "-mllvm",
- "-split-threshold-for-reg-with-hint=0",
- ]
- if (use_thin_lto && is_a_target_toolchain) {
- if (is_win) {
- ldflags += [ "-mllvm:-split-threshold-for-reg-with-hint=0" ]
- } else {
- ldflags += [ "-Wl,-mllvm,-split-threshold-for-reg-with-hint=0" ]
- }
- }
- }
-
# TODO(crbug.com/40192287): Investigate why/if this should be needed.
if (is_win) {
cflags += [ "/clang:-ffp-contract=off" ]
@@ -1045,20 +1027,6 @@
# `-nodefaultlibs` from the linker invocation from Rust, which would be used
# to compile dylibs on Android, such as for constructing unit test APKs.
"-Cdefault-linker-libraries",
-
- # To make Rust .d files compatible with ninja
- "-Zdep-info-omit-d-target",
-
- # If a macro panics during compilation, show which macro and where it is
- # defined.
- "-Zmacro-backtrace",
-
- # For deterministic builds, keep the local machine's current working
- # directory from appearing in build outputs.
- "-Zremap-cwd-prefix=.",
-
- # We use clang-rt sanitizer runtimes.
- "-Zexternal-clangrt",
]
if (!is_win || force_rustc_color_output) {
@@ -1107,7 +1075,6 @@
# Don't allow unstable features to be enabled by `#![feature()]` without
# additional command line flags.
config("disallow_unstable_features") {
- rustflags = [ "-Zallow-features=" ]
}
config("libcxx_hardening") {
@@ -1242,8 +1209,8 @@
}
} else if (current_cpu == "arm") {
if (is_clang && !is_android && !is_nacl && !is_chromeos_device) {
- cflags += [ "--target=arm-linux-gnueabihf" ]
- ldflags += [ "--target=arm-linux-gnueabihf" ]
+ cflags += [ "--target=armv7-alpine-linux-musleabihf" ]
+ ldflags += [ "--target=armv7-alpine-linux-musleabihf" ]
}
if (!is_nacl) {
cflags += [
@@ -1257,8 +1224,8 @@
} else if (current_cpu == "arm64") {
if (is_clang && !is_android && !is_nacl && !is_fuchsia &&
!is_chromeos_device) {
- cflags += [ "--target=aarch64-linux-gnu" ]
- ldflags += [ "--target=aarch64-linux-gnu" ]
+ cflags += [ "--target=aarch64-alpine-linux-musl" ]
+ ldflags += [ "--target=aarch64-alpine-linux-musl" ]
}
} else if (current_cpu == "mipsel" && !is_nacl) {
ldflags += [ "-Wl,--hash-style=sysv" ]
@@ -2086,7 +2053,7 @@
defines = [ "_HAS_NODISCARD" ]
}
} else {
- cflags = [ "-Wall" ]
+ cflags = []
if (is_clang) {
# Enable extra warnings for chromium_code when we control the compiler.
cflags += [ "-Wextra" ]
--- ./build/config/rust.gni.orig
+++ ./build/config/rust.gni
@@ -198,13 +198,13 @@
rust_abi_target = ""
if (is_linux || is_chromeos) {
if (current_cpu == "arm64") {
- rust_abi_target = "aarch64-unknown-linux-gnu"
+ rust_abi_target = "aarch64-alpine-linux-musl"
cargo_target_abi = ""
} else if (current_cpu == "x86") {
- rust_abi_target = "i686-unknown-linux-gnu"
+ rust_abi_target = "i586-alpine-linux-musl"
cargo_target_abi = ""
} else if (current_cpu == "x64") {
- rust_abi_target = "x86_64-unknown-linux-gnu"
+ rust_abi_target = "x86_64-alpine-linux-musl"
cargo_target_abi = ""
} else if (current_cpu == "arm") {
if (arm_float_abi == "hard") {
@@ -214,18 +214,18 @@
}
if (arm_arch == "armv7-a" || arm_arch == "armv7") {
# No way to inform Rust about the -a suffix.
- rust_abi_target = "armv7-unknown-linux-gnueabi" + float_suffix
+ rust_abi_target = "armv7-alpine-linux-musleabi" + float_suffix
cargo_target_abi = "eabi" + float_suffix
} else {
- rust_abi_target = "arm-unknown-linux-gnueabi" + float_suffix
+ rust_abi_target = "armv6-alpine-linux-musleabi" + float_suffix
cargo_target_abi = "eabi" + float_suffix
}
} else if (current_cpu == "riscv64") {
- rust_abi_target = "riscv64gc-unknown-linux-gnu"
+ rust_abi_target = "riscv64-alpine-linux-musl"
cargo_target_abi = ""
} else {
# Best guess for other future platforms.
- rust_abi_target = current_cpu + "-unknown-linux-gnu"
+ rust_abi_target = current_cpu + "-alpine-linux-musl"
cargo_target_abi = ""
}
} else if (is_android) {
--- ./build/config/clang/BUILD.gn.orig
+++ ./build/config/clang/BUILD.gn
@@ -128,14 +128,15 @@
} else if (is_apple) {
_dir = "darwin"
} else if (is_linux || is_chromeos) {
+ _dir = "linux"
if (current_cpu == "x64") {
- _dir = "x86_64-unknown-linux-gnu"
+ _suffix = "-x86_64"
} else if (current_cpu == "x86") {
- _dir = "i386-unknown-linux-gnu"
+ _suffix = "-i386"
} else if (current_cpu == "arm") {
- _dir = "armv7-unknown-linux-gnueabihf"
+ _suffix = "-armhf"
} else if (current_cpu == "arm64") {
- _dir = "aarch64-unknown-linux-gnu"
+ _suffix = "-aarch64"
} else {
assert(false) # Unhandled cpu type
}
--- ./build/config/gcc/BUILD.gn.orig
+++ ./build/config/gcc/BUILD.gn
@@ -32,7 +32,6 @@
# See http://gcc.gnu.org/wiki/Visibility
config("symbol_visibility_hidden") {
cflags = [ "-fvisibility=hidden" ]
- rustflags = [ "-Zdefault-visibility=hidden" ]
# Visibility attribute is not supported on AIX.
if (current_os != "aix") {

View file

@ -0,0 +1,11 @@
--- a/sandbox/linux/services/credentials.h
+++ b/sandbox/linux/services/credentials.h
@@ -14,6 +14,8 @@
#include <string>
#include <vector>
+#include <sys/types.h>
+
#include "sandbox/linux/system_headers/capability.h"
#include "sandbox/sandbox_export.h"

View file

@ -0,0 +1,45 @@
--- a/base/threading/platform_thread_linux.cc
+++ b/base/threading/platform_thread_linux.cc
@@ -186,7 +186,8 @@
size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes) {
#if !defined(THREAD_SANITIZER)
- return 0;
+ // use 2mb to avoid running out of space. This is what android uses
+ return 2 * (1 << 20);
#else
// ThreadSanitizer bloats the stack heavily. Evidence has been that the
// default stack size isn't enough for some browser tests.
--- a/base/threading/platform_thread_unittest.cc
+++ b/base/threading/platform_thread_unittest.cc
@@ -420,7 +420,7 @@
((BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)) && \
!defined(THREAD_SANITIZER)) || \
(BUILDFLAG(IS_ANDROID) && !defined(ADDRESS_SANITIZER))
- EXPECT_EQ(0u, stack_size);
+ EXPECT_EQ(2u << 20, stack_size);
#else
EXPECT_GT(stack_size, 0u);
EXPECT_LT(stack_size, 20u * (1 << 20));
--- a/chrome/browser/shutdown_signal_handlers_posix.cc
+++ b/chrome/browser/shutdown_signal_handlers_posix.cc
@@ -187,11 +187,19 @@
g_shutdown_pipe_read_fd = pipefd[0];
g_shutdown_pipe_write_fd = pipefd[1];
#if !defined(ADDRESS_SANITIZER)
+# if defined(__GLIBC__)
const size_t kShutdownDetectorThreadStackSize = PTHREAD_STACK_MIN * 2;
+# else
+ const size_t kShutdownDetectorThreadStackSize = PTHREAD_STACK_MIN * 2 * 8; // match up musls 2k PTHREAD_STACK_MIN with glibcs 16k
+# endif
#else
+# if defined(__GLIBC__)
// ASan instrumentation bloats the stack frames, so we need to increase the
// stack size to avoid hitting the guard page.
const size_t kShutdownDetectorThreadStackSize = PTHREAD_STACK_MIN * 4;
+# else
+ const size_t kShutdownDetectorThreadStackSize = PTHREAD_STACK_MIN * 4 * 8; // match up musls 2k PTHREAD_STACK_MIN with glibcs 16k
+# endif
#endif
ShutdownDetector* detector = new ShutdownDetector(
g_shutdown_pipe_read_fd, std::move(shutdown_callback), task_runner);

View file

@ -2,8 +2,4 @@
# the electron launcher.
# Options to pass to electron.
ELECTRON_FLAGS="--enable-features=WebRTCPipeWireCapturer"
# This can be 'x11', 'wayland', or 'auto'. Overriding default to 'auto',
# but respecting the variable content if any
ELECTRON_OZONE_PLATFORM_HINT="${ELECTRON_OZONE_PLATFORM_HINT:-auto}"
ELECTRON_FLAGS="--ozone-platform-hint=auto"

View file

@ -1,15 +0,0 @@
diff --git a/net/dns/BUILD.gn b/net/dns/BUILD.gn
index f36bf68..805d9a6 100644
--- a/net/dns/BUILD.gn
+++ b/net/dns/BUILD.gn
@@ -130,8 +130,8 @@ source_set("dns") {
]
} else if (is_linux) {
sources += [
- "dns_config_service_linux.cc",
- "dns_config_service_linux.h",
+ "dns_config_service_fuchsia.cc",
+ "dns_config_service_fuchsia.h",
]
} else if (is_posix) {
sources += [

View file

@ -1,343 +0,0 @@
safesprintf emitnull:
error: conversion from 'std::nullptr_t' to 'const internal::Arg' is ambiguous
const internal::Arg arg_array[] = { args... };
flatmap incompletetype:
error: static assertion failed due to requirement 'std::__is_complete_or_unbounded(std::__type_identity<std::pair<A, A>>{})': template argument must be a complete class or an unbounded array
static_assert(std::__is_complete_or_unbounded(__type_identity<_Tp>{}),
i18n, time:
various icu failures (new icu time formatting? internal api difference?)
a ton of these fail:
Expected equality of these values:
u"Monday 16 May Saturday 28 May"
Which is: u"Monday 16 May \x2013 Saturday 28 May"
DateIntervalFormat(begin_time, end_time, DATE_FORMAT_MONTH_WEEKDAY_DAY)
Which is: u"Monday 16\x2009\x2013\x2009Saturday 28 May"
../../base/i18n/time_formatting_unittest.cc:84: Failure
Expected equality of these values:
clock12h_pm
Which is: u"3:42 PM"
TimeFormatTimeOfDay(time)
Which is: u"3:42\x202FPM"
.. and so on
fileutiltest filetofile:
../../base/files/file_util_unittest.cc:2692: Failure
Value of: stream
Actual: true
Expected: false
stacktracetest: crashes (this doesn't seem to use execinfo so probably relies on glibc internal layout for tracing here)
platformthreadtest canchangethreadtype:
../../base/threading/platform_thread_unittest.cc:445: Failure
Expected equality of these values:
PlatformThread::CanChangeThreadType(ThreadType::kBackground, ThreadType::kResourceEfficient)
Which is: true
kCanIncreasePriority
Which is: false
scopedfdownershiptrackingtest crashonunownedclose: fails due to scoped-file-no-close.patch
stackcontainer customallocator:
../../base/containers/stack_container_unittest.cc:211: Failure
Expected equality of these values:
1
Allocator::deallocated
Which is: 0
nativelibrarytest loadlibrarypreferownsymbols: crashes (probably musl dlopen does not play nice here)
spantest empty: crashes (this looks fishy)
readelfbuildid: crashes (this looks like glibc dynamic linker semantics)
nss db unittest: various nss failures: e.g.:
../../net/cert/nss_cert_database_unittest.cc:209: Failure
Expected equality of these values:
OK
Which is: 0
cert_db_->ImportFromPKCS12(GetPublicSlot(), pkcs12_data, u"12345", true, nullptr)
Which is: -702
processutiltest cloneflags: fails in CI (ulimit? too many threads?)
../../base/process/process_util_unittest.cc:1434: Failure
Value of: process.IsValid()
Actual: false
Expected: true
addresstrackerlinuxnetlinktest:
../../net/base/address_tracker_linux_unittest.cc:886: Failure
Value of: child.process.IsValid()
Actual: false
Expected: true
ToAddressDoesNotDereference: ; Expected `get_for_extraction_cnt` to be 1 but got 0;
DataCapturedManyThreads: flaky
ProcessAlternativeServicesTest.Process*: crashed ?
--- a/base/strings/safe_sprintf_unittest.cc
+++ b/base/strings/safe_sprintf_unittest.cc
@@ -740,6 +740,7 @@
#endif
}
+#if 0
TEST(SafeSPrintfTest, EmitNULL) {
char buf[40];
#if defined(__GNUC__)
@@ -756,6 +757,7 @@
#pragma GCC diagnostic pop
#endif
}
+#endif
TEST(SafeSPrintfTest, PointerSize) {
// The internal data representation is a 64bit value, independent of the
--- a/base/containers/flat_map_unittest.cc
+++ b/base/containers/flat_map_unittest.cc
@@ -52,6 +52,7 @@
} // namespace
+#if 0
TEST(FlatMap, IncompleteType) {
struct A {
using Map = flat_map<A, A>;
@@ -65,6 +66,7 @@
A a;
}
+#endif
TEST(FlatMap, RangeConstructor) {
flat_map<int, int>::value_type input_vals[] = {
--- a/base/BUILD.gn
+++ b/base/BUILD.gn
@@ -3194,21 +3194,6 @@
"hash/md5_constexpr_unittest.cc",
"hash/md5_unittest.cc",
"hash/sha1_unittest.cc",
- "i18n/break_iterator_unittest.cc",
- "i18n/case_conversion_unittest.cc",
- "i18n/char_iterator_unittest.cc",
- "i18n/character_encoding_unittest.cc",
- "i18n/file_util_icu_unittest.cc",
- "i18n/icu_string_conversions_unittest.cc",
- "i18n/icu_util_unittest.cc",
- "i18n/message_formatter_unittest.cc",
- "i18n/number_formatting_unittest.cc",
- "i18n/rtl_unittest.cc",
- "i18n/streaming_utf8_validator_unittest.cc",
- "i18n/string_search_unittest.cc",
- "i18n/time_formatting_unittest.cc",
- "i18n/timezone_unittest.cc",
- "i18n/transliterator_unittest.cc",
"immediate_crash_unittest.cc",
"json/json_parser_unittest.cc",
"json/json_reader_unittest.cc",
--- a/base/files/file_util_unittest.cc
+++ b/base/files/file_util_unittest.cc
@@ -2686,6 +2686,7 @@
}
}
+#if 0
TEST_F(FileUtilTest, FileToFILE) {
File file;
FILE* stream = FileToFILE(std::move(file), "w");
@@ -2700,6 +2701,7 @@
EXPECT_FALSE(file.IsValid());
EXPECT_TRUE(CloseFile(stream));
}
+#endif
TEST_F(FileUtilTest, FILEToFile) {
ScopedFILE stream;
--- a/base/threading/platform_thread_unittest.cc
+++ b/base/threading/platform_thread_unittest.cc
@@ -416,6 +416,7 @@
// platforms for all priorities. This not being the case. This test documents
// and hardcodes what we know. Please inform scheduler-dev@chromium.org if this
// proprerty changes for a given platform.
+#if 0
TEST(PlatformThreadTest, CanChangeThreadType) {
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
// On Ubuntu, RLIMIT_NICE and RLIMIT_RTPRIO are 0 by default, so we won't be
@@ -472,6 +473,7 @@
ThreadType::kBackground));
#endif
}
+#endif
TEST(PlatformThreadTest, SetCurrentThreadTypeTest) {
TestPriorityResultingFromThreadType(ThreadType::kBackground,
--- a/base/files/scoped_file_linux_unittest.cc
+++ b/base/files/scoped_file_linux_unittest.cc
@@ -42,11 +42,13 @@
EXPECT_DEATH(ScopedFD(fd.get()), "");
}
+#if 0
TEST_F(ScopedFDOwnershipTrackingTest, CrashOnUnownedClose) {
ScopedFD fd = OpenFD();
subtle::EnableFDOwnershipEnforcement(true);
EXPECT_DEATH(close(fd.get()), "");
}
+#endif
#endif // defined(GTEST_HAS_DEATH_TEST)
--- a/base/native_library_unittest.cc
+++ b/base/native_library_unittest.cc
@@ -139,6 +139,7 @@
// Verifies that the |prefer_own_symbols| option satisfies its guarantee that
// a loaded library will always prefer local symbol resolution before
// considering global symbols.
+#if 0
TEST(NativeLibraryTest, LoadLibraryPreferOwnSymbols) {
NativeLibraryOptions options;
options.prefer_own_symbols = true;
@@ -171,6 +172,7 @@
EXPECT_EQ(2, NativeLibraryTestIncrement());
EXPECT_EQ(3, NativeLibraryTestIncrement());
}
+#endif
#endif // !BUILDFLAG(IS_ANDROID) && !defined(THREAD_SANITIZER) && \
// !defined(MEMORY_SANITIZER)
--- a/base/containers/span_unittest.cc
+++ b/base/containers/span_unittest.cc
@@ -995,6 +995,7 @@
}
}
+#if 0
TEST(SpanTest, Empty) {
{
span<int> span;
@@ -1014,6 +1015,7 @@
EXPECT_TRUE(span_of_checked_iterators.empty());
}
}
+#endif
TEST(SpanTest, OperatorAt) {
static constexpr int kArray[] = {1, 6, 1, 8, 0};
--- a/base/debug/elf_reader_unittest.cc
+++ b/base/debug/elf_reader_unittest.cc
@@ -194,6 +194,7 @@
}
}
+#if 0
TEST(ElfReaderTestWithCurrentImage, ReadElfBuildId) {
#if BUILDFLAG(IS_ANDROID)
// On Android the library loader memory maps the full so file.
@@ -229,6 +230,7 @@
UnloadNativeLibrary(library);
#endif
}
+#endif
} // namespace debug
} // namespace base
--- a/net/BUILD.gn
+++ b/net/BUILD.gn
@@ -4826,7 +4826,6 @@
sources += [
"cert/internal/system_trust_store_nss_unittest.cc",
"cert/internal/trust_store_nss_unittest.cc",
- "cert/nss_cert_database_unittest.cc",
"cert/x509_util_nss_unittest.cc",
]
if (!is_castos) {
--- a/base/process/process_util_unittest.cc
+++ b/base/process/process_util_unittest.cc
@@ -1419,7 +1419,7 @@
return kSuccess;
}
-#if defined(CLONE_NEWUSER) && defined(CLONE_NEWPID)
+#if 0 && defined(CLONE_NEWUSER) && defined(CLONE_NEWPID)
TEST_F(ProcessUtilTest, CloneFlags) {
if (!PathExists(FilePath("/proc/self/ns/user")) ||
!PathExists(FilePath("/proc/self/ns/pid"))) {
--- a/net/base/address_tracker_linux_unittest.cc
+++ b/net/base/address_tracker_linux_unittest.cc
@@ -831,6 +831,7 @@
//
// This test creates multiple concurrent `AddressTrackerLinux` instances in
// separate processes, each in their own PID namespaces.
+#if 0
TEST(AddressTrackerLinuxNetlinkTest, TestInitializeTwoTrackersInPidNamespaces) {
// This test initializes `kNumChildren` instances of `AddressTrackerLinux` in
// tracking mode, each in their own child process running in a PID namespace.
@@ -901,6 +902,7 @@
ASSERT_EQ(exit_code, 0);
}
}
+#endif
MULTIPROCESS_TEST_MAIN(ChildProcessInitializeTrackerForTesting) {
base::test::TaskEnvironment task_env(
--- a/base/trace_event/trace_event_unittest.cc
+++ b/base/trace_event/trace_event_unittest.cc
@@ -1368,6 +1368,7 @@
}
// Test that data sent from multiple threads is gathered
+#if 0
TEST_F(TraceEventTestFixture, DataCapturedManyThreads) {
BeginTrace();
@@ -1408,6 +1409,7 @@
delete task_complete_events[i];
}
}
+#endif
// Test that thread and process names show up in the trace.
// In SDK build, thread names are not tracked inside //base. Instead, there's
--- a/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_unittest.cc
+++ b/base/allocator/partition_allocator/src/partition_alloc/pointers/raw_ptr_unittest.cc
@@ -1481,6 +1481,7 @@
// `base::to_address()` will use the dereference operator. This is not
// what we want; this test enforces extraction semantics for
// `to_address()`.
+#if 0
TEST_F(RawPtrTest, ToAddressDoesNotDereference) {
CountingRawPtr<int> ptr = nullptr;
int* raw = base::to_address(ptr);
@@ -1492,6 +1493,7 @@
.get_for_duplication_cnt = 0}),
CountersMatch());
}
+#endif
TEST_F(RawPtrTest, ToAddressGivesBackRawAddress) {
int* raw = nullptr;
--- a/net/http/http_stream_factory_unittest.cc
+++ b/net/http/http_stream_factory_unittest.cc
@@ -3477,6 +3477,7 @@
DefaultCTPolicyEnforcer ct_policy_enforcer_;
};
+#if 0
TEST_F(ProcessAlternativeServicesTest, ProcessEmptyAltSvc) {
session_ =
std::make_unique<HttpNetworkSession>(session_params_, session_context_);
@@ -3585,6 +3586,7 @@
alternatives[0].host_port_pair());
EXPECT_EQ(0u, alternatives[0].advertised_versions().size());
}
+#endif
} // namespace

View file

@ -0,0 +1,36 @@
--- a/net/dns/host_resolver_manager.cc
+++ b/net/dns/host_resolver_manager.cc
@@ -3014,8 +3014,7 @@
NetworkChangeNotifier::AddConnectionTypeObserver(this);
if (system_dns_config_notifier_)
system_dns_config_notifier_->AddObserver(this);
-#if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_OPENBSD) && \
- !BUILDFLAG(IS_ANDROID)
+#if defined(__GLIBC__)
EnsureDnsReloaderInit();
#endif
--- a/net/dns/dns_reloader.cc
+++ b/net/dns/dns_reloader.cc
@@ -6,8 +6,7 @@
#include "build/build_config.h"
-#if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_OPENBSD) && \
- !BUILDFLAG(IS_ANDROID) && !BUILDFLAG(IS_FUCHSIA)
+#if defined(__GLIBC__)
#include <resolv.h>
--- a/net/dns/host_resolver_proc.cc
+++ b/net/dns/host_resolver_proc.cc
@@ -176,8 +176,7 @@
base::ScopedBlockingCall scoped_blocking_call(FROM_HERE,
base::BlockingType::WILL_BLOCK);
-#if BUILDFLAG(IS_POSIX) && \
- !(BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_OPENBSD) || BUILDFLAG(IS_ANDROID))
+#if defined(__GLIBC__)
DnsReloaderMaybeReload();
#endif
absl::optional<AddressInfo> ai;

View file

@ -9,12 +9,6 @@ done
# Prefer user defined ELECTRON_USER_FLAGS (from env) over system
# default ELECTRON_FLAGS (from /etc/electron/default.conf).
export ELECTRON_FLAGS="$ELECTRON_FLAGS ${ELECTRON_USER_FLAGS:-"$ELECTRON_USER_FLAGS"}"
# Re-export, for it to be accessible by the process
export ELECTRON_OZONE_PLATFORM_HINT="${ELECTRON_OZONE_PLATFORM_HINT}"
if [ "$ELECTRON_RUN_AS_NODE" == "1" ] && [ "$ELECTRON_STILL_PASS_THE_DEFAULT_FLAGS" != "1" ]; then
exec "/usr/lib/electron/electron" "$@"
fi
ELECTRON_FLAGS=${ELECTRON_USER_FLAGS:-"$ELECTRON_FLAGS"}
exec "/usr/lib/electron/electron" "$@" ${ELECTRON_FLAGS}

View file

@ -1,12 +0,0 @@
diff --git a/electron/BUILD.gn.orig b/electron/BUILD.gn
index 235c7abd3e8..088c24ac45e 100644
--- a/electron/BUILD.gn.orig
+++ b/electron/BUILD.gn
@@ -1569,7 +1569,6 @@ group("copy_node_headers") {
":generate_node_headers",
":node_gypi_headers",
":node_version_header",
- ":zlib_headers",
]
}

View file

@ -1,143 +0,0 @@
diff --git a/electron/script/generate-config-gypi.py.orig b/electron/script/generate-config-gypi.py
index 58c973b..c215d90 100755
--- a/electron/script/generate-config-gypi.py.orig
+++ b/electron/script/generate-config-gypi.py
@@ -64,6 +64,11 @@ def main(target_file, target_cpu):
# in common.gypi
if 'clang' in v:
del v['clang']
+
+ with open(os.path.join(NODE_DIR, 'use_system.txt')) as f:
+ for dep in f.read().strip().split(' '):
+ if v.get(f'node_shared_{dep}') is not None:
+ v[f'node_shared_{dep}'] = 'true'
with open(target_file, 'w+', encoding='utf-8') as file_out:
file_out.write(pprint.pformat(config, indent=2))
diff --git a/third_party/electron_node/node.gni.orig b/third_party/electron_node/node.gni
index 73bf383..1c80d5a 100644
--- a/third_party/electron_node/node.gni.orig
+++ b/third_party/electron_node/node.gni
@@ -73,6 +73,7 @@ declare_args() {
node_use_amaro = true
# Allows downstream packagers (eg. Linux distributions) to build against system shared libraries.
+ use_system_ada = false
use_system_cares = false
use_system_nghttp2 = false
use_system_llhttp = false
diff --git a/third_party/electron_node/unofficial.gni.orig b/third_party/electron_node/unofficial.gni
index d61a9bd..8bf990e 100644
--- a/third_party/electron_node/unofficial.gni.orig
+++ b/third_party/electron_node/unofficial.gni
@@ -143,7 +143,6 @@ template("node_gn_build") {
"deps/googletest:googletest_config",
]
public_deps = [
- "deps/ada",
"deps/uv",
"//electron:electron_js2c",
"deps/simdjson",
@@ -151,10 +150,7 @@ template("node_gn_build") {
]
deps = [
":run_node_js2c",
- "deps/cares",
- "deps/histogram",
"deps/nbytes",
- "deps/nghttp2",
"deps/postject",
"deps/sqlite",
"deps/uvwasi",
@@ -182,12 +178,30 @@ template("node_gn_build") {
if (is_posix) {
configs -= [ "//build/config/gcc:symbol_visibility_hidden" ]
configs += [ "//build/config/gcc:symbol_visibility_default" ]
+ libs = []
+ include_dirs = []
}
if (use_system_llhttp) {
libs += [ "llhttp" ]
} else {
deps += [ "deps/llhttp" ]
}
+ if (use_system_cares) {
+ libs += [ "cares" ]
+ } else {
+ deps += [ "deps/cares" ]
+ }
+ if (use_system_nghttp2) {
+ libs += [ "nghttp2" ]
+ } else {
+ deps += [ "deps/nghttp2" ]
+ }
+ if (use_system_ada) {
+ libs += [ "ada" ]
+ include_dirs += [ "/usr/include/ada" ]
+ } else {
+ public_deps += [ "deps/ada" ]
+ }
if (use_system_histogram) {
libs += [ "hdr_histogram" ]
include_dirs += [ "/usr/include/hdr" ]
@@ -208,7 +222,7 @@ template("node_gn_build") {
"src/inspector:node_protocol_generated_sources",
"src/inspector:v8_inspector_compress_protocol_json",
]
- include_dirs = [
+ include_dirs += [
"$target_gen_dir/src",
"$target_gen_dir/src/inspector",
"$node_inspector_protocol_path",
@@ -222,17 +236,18 @@ template("node_gn_build") {
sources += node_inspector.node_inspector_sources +
node_inspector.node_inspector_generated_sources
}
- if (is_linux) {
- import("//build/config/linux/pkg_config.gni")
- if (use_system_cares) {
- pkg_config("cares") {
- packages = [ "libcares" ]
- }
- }
- if (use_system_nghttp2) {
- pkg_config("nghttp2") {
- packages = [ "libnghttp2" ]
- }
+ }
+
+ if (is_linux) {
+ import("//build/config/linux/pkg_config.gni")
+ if (use_system_cares) {
+ pkg_config("cares") {
+ packages = [ "libcares" ]
+ }
+ }
+ if (use_system_nghttp2) {
+ pkg_config("nghttp2") {
+ packages = [ "libnghttp2" ]
}
}
}
diff --git a/third_party/electron_node/unofficial.gni.orig b/third_party/electron_node/unofficial.gni
index 6bcc40b..7e383b2 100644
--- a/third_party/electron_node/unofficial.gni.orig
+++ b/third_party/electron_node/unofficial.gni
@@ -142,7 +142,6 @@ template("node_gn_build") {
public_configs = [
":node_external_config",
"deps/googletest:googletest_config",
- ":zstd_include_config"
]
public_deps = [
"deps/ada",
@@ -163,8 +162,6 @@ template("node_gn_build") {
"//third_party/zlib",
"//third_party/brotli:dec",
"//third_party/brotli:enc",
- "//third_party/zstd:decompress",
- "//third_party/zstd:headers",
"$node_simdutf_path",
"$node_v8_path:v8_libplatform",
]

View file

@ -1,13 +0,0 @@
instead of hardcoding the version, use the defined macro.
--
--- a/third_party/test_fonts/fontconfig/generate_fontconfig_caches.cc
+++ b/third_party/test_fonts/fontconfig/generate_fontconfig_caches.cc
@@ -56,7 +56,7 @@
FcFini();
// Check existence of intended fontconfig cache file.
- auto cache = fontconfig_caches + "/" + kCacheKey + "-le64.cache-9";
+ auto cache = fontconfig_caches + "/" + kCacheKey + "-le64.cache-" + FC_CACHE_VERSION;
bool cache_exists = access(cache.c_str(), F_OK) == 0;
return !cache_exists;
}

View file

@ -0,0 +1,31 @@
--- a/third_party/crashpad/crashpad/client/BUILD.gn
+++ b/third_party/crashpad/crashpad/client/BUILD.gn
@@ -81,6 +81,7 @@
deps = [
":common",
"$mini_chromium_source_parent:chromeos_buildflags",
+ "../util",
]
if (crashpad_is_win) {
--- a/third_party/crashpad/crashpad/util/linux/ptracer.cc
+++ b/third_party/crashpad/crashpad/util/linux/ptracer.cc
@@ -26,6 +26,7 @@
#if defined(ARCH_CPU_X86_FAMILY)
#include <asm/ldt.h>
+#include <asm/ptrace-abi.h>
#endif
namespace crashpad {
--- a/third_party/crashpad/crashpad/util/linux/thread_info.h
+++ b/third_party/crashpad/crashpad/util/linux/thread_info.h
@@ -273,7 +273,7 @@ union FloatContext {
"Size mismatch");
#elif defined(ARCH_CPU_ARMEL)
static_assert(sizeof(f32_t::fpregs) == sizeof(user_fpregs), "Size mismatch");
-#if !defined(__GLIBC__)
+#if defined(OS_ANDROID)
static_assert(sizeof(f32_t::vfp) == sizeof(user_vfp), "Size mismatch");
#endif
#elif defined(ARCH_CPU_ARM64)

View file

@ -0,0 +1,11 @@
Patch-Source: https://github.com/void-linux/void-packages/blob/378db3cf5087877588aebaaa8ca3c9d94dfb54e0/srcpkgs/chromium/patches/fix-missing-cstdint-include-musl.patch
--- a/net/third_party/quiche/src/quiche/http2/adapter/window_manager.h
+++ b/net/third_party/quiche/src/quiche/http2/adapter/window_manager.h
@@ -3,6 +3,7 @@
#include <stddef.h>
+#include <cstdint>
#include <functional>
#include "common/platform/api/quiche_export.h"

View file

@ -0,0 +1,44 @@
--- a/base/files/file_util_linux.cc
+++ b/base/files/file_util_linux.cc
@@ -30,7 +30,7 @@
case EXT2_SUPER_MAGIC: // Also ext3 and ext4
case MSDOS_SUPER_MAGIC:
case REISERFS_SUPER_MAGIC:
- case static_cast<int>(BTRFS_SUPER_MAGIC):
+ case BTRFS_SUPER_MAGIC:
case 0x5346544E: // NTFS
case 0x58465342: // XFS
case 0x3153464A: // JFS
@@ -40,14 +40,14 @@
*type = FILE_SYSTEM_NFS;
break;
case SMB_SUPER_MAGIC:
- case static_cast<int>(0xFF534D42): // CIFS
+ case 0xFF534D42: // CIFS
*type = FILE_SYSTEM_SMB;
break;
case CODA_SUPER_MAGIC:
*type = FILE_SYSTEM_CODA;
break;
- case static_cast<int>(HUGETLBFS_MAGIC):
- case static_cast<int>(RAMFS_MAGIC):
+ case HUGETLBFS_MAGIC:
+ case RAMFS_MAGIC:
case TMPFS_MAGIC:
*type = FILE_SYSTEM_MEMORY;
break;
--- a/base/system/sys_info_posix.cc
+++ b/base/system/sys_info_posix.cc
@@ -100,10 +100,10 @@
if (HANDLE_EINTR(statfs(path.value().c_str(), &stats)) != 0)
return false;
switch (stats.f_type) {
case TMPFS_MAGIC:
- case static_cast<int>(HUGETLBFS_MAGIC):
- case static_cast<int>(RAMFS_MAGIC):
+ case HUGETLBFS_MAGIC:
+ case RAMFS_MAGIC:
return true;
}
return false;

View file

@ -1,11 +0,0 @@
--- a/media/ffmpeg/ffmpeg_common.cc
+++ b/media/ffmpeg/ffmpeg_common.cc
@@ -1025,7 +1025,7 @@
static const base::NoDestructor<std::string> kAllowedAudioCodecs([]() {
// This should match the configured lists in //third_party/ffmpeg.
std::string allowed_decoders(
- "vorbis,libopus,flac,pcm_u8,pcm_s16le,pcm_s24le,pcm_s32le,pcm_f32le,"
+ "vorbis,opus,libopus,flac,pcm_u8,pcm_s16le,pcm_s24le,pcm_s32le,pcm_f32le,"
"mp3,pcm_s16be,pcm_s24be,pcm_mulaw,pcm_alaw");
#if BUILDFLAG(USE_PROPRIETARY_CODECS)
allowed_decoders += ",aac";

View file

@ -1,17 +0,0 @@
fstatat64 is macrod to fstatat in sys/stat.h in musl- but then that fstatat is
used in the _syscall4 macro mapping to __NR_$name, and __NR_fstatat is not
defined anywhere here, as it wants the 64 name.
so, just let it keep the name with an undef of the stat.h macro, then the macro
expansion below evaluates correctly.
--- a/third_party/lss/linux_syscall_support.h
+++ b/third_party/lss/linux_syscall_support.h
@@ -4947,7 +4947,8 @@
# endif
#endif
#if defined(__NR_fstatat64)
+ #undef fstatat64
LSS_INLINE _syscall4(int, fstatat64, int, d,
const char *, p,
struct kernel_stat64 *, b, int, f)
#endif

View file

@ -1,4 +1,3 @@
aports is a git tree so these git commands get the wrong directory
--- a/tools/gdb/gdbinit
+++ b/tools/gdb/gdbinit
@@ -50,17 +50,7 @@

View file

@ -1,11 +0,0 @@
--- a/services/device/public/cpp/generic_sensor/sensor_reading.h
+++ b/services/device/public/cpp/generic_sensor/sensor_reading.h
@@ -5,6 +5,8 @@
#ifndef SERVICES_DEVICE_PUBLIC_CPP_GENERIC_SENSOR_SENSOR_READING_H_
#define SERVICES_DEVICE_PUBLIC_CPP_GENERIC_SENSOR_SENSOR_READING_H_
+#include <cstddef>
+#include <cstdint>
#include <type_traits>
namespace device {

View file

@ -1,29 +0,0 @@
diff --git a/third_party/blink/renderer/build/scripts/gperf.py.orig b/third_party/blink/renderer/build/scripts/gperf.py
index 42630d3..d909aee 100644
--- a/third_party/blink/renderer/build/scripts/gperf.py.orig
+++ b/third_party/blink/renderer/build/scripts/gperf.py
@@ -28,24 +28,6 @@ def generate_gperf(gperf_path, gperf_input, gperf_args):
stdout=subprocess.PIPE,
universal_newlines=True)
gperf_output = gperf.communicate(gperf_input)[0]
- # Massage gperf output to be more palatable for modern compilers.
- # TODO(thakis): Upstream these to gperf so we don't need massaging.
- # `register` is deprecated in C++11 and removed in C++17, so remove
- # it from gperf's output.
- # https://savannah.gnu.org/bugs/index.php?53028
- gperf_output = re.sub(r'\bregister ', '', gperf_output)
- # -Wimplicit-fallthrough needs an explicit fallthrough statement,
- # so replace gperf's /*FALLTHROUGH*/ comment with the statement.
- # https://savannah.gnu.org/bugs/index.php?53029
- gperf_output = gperf_output.replace('/*FALLTHROUGH*/',
- ' [[fallthrough]];')
- # -Wpointer-to-int-cast warns about casting pointers to smaller ints
- # Replace {(int)(long)&(foo), bar} with
- # {static_cast<int>(reinterpret_cast<uintptr_t>(&(foo)), bar}
- gperf_output = re.sub(
- r'\(int\)\(long\)(.*?),',
- r'static_cast<int>(reinterpret_cast<uintptr_t>(\1)),',
- gperf_output)
script = 'third_party/blink/renderer/build/scripts/gperf.py'
return '// Generated by %s\n' % script + gperf_output
except OSError:

View file

@ -1,11 +1,11 @@
--- a/electron/default_app/default_app.ts
+++ b/electron/default_app/default_app.ts
@@ -61,7 +61,7 @@
@@ -60,7 +60,7 @@
};
if (process.platform === 'linux') {
- options.icon = url.fileURLToPath(new URL('icon.png', import.meta.url));
+ options.icon = 'file:///usr/share/icons/hicolor/1024x1024/apps/electron.png';
- options.icon = path.join(__dirname, 'icon.png');
+ options.icon = '/usr/share/icons/hicolor/1024x1024/apps/electron.png';
}
mainWindow = new BrowserWindow(options);

View file

@ -0,0 +1,39 @@
Patch-Source: https://github.com/archlinux/svntogit-packages/blob/bf2401407df5bcc938382eb03748fbef41e41c89/trunk/unbundle-jsoncpp-avoid-CFI-faults-with-is_cfi-true.patch
From ed8d931e35f81d8566835a579caf7d61368f85b7 Mon Sep 17 00:00:00 2001
From: Evangelos Foutras <evangelos@foutrelis.com>
Date: Tue, 27 Sep 2022 22:20:41 +0000
Subject: [PATCH] unbundle/jsoncpp: avoid CFI faults with is_cfi=true
Ensure jsoncpp symbols have public visibility and are thus excluded from
CFI checks and whole-program optimization. This is achieved by defining
JSON_DLL_BUILD which in turn causes json/config.h to define JSON_API as
__attribute__((visibility("default"))). The latter macro is used to tag
jsoncpp classes and namespace functions throughout jsoncpp's headers.
BUG=1365218
Change-Id: I56277737b7d9ecaeb5e17c8d21a2e55f3d5d5bc9
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/3919652
Reviewed-by: Thomas Anderson <thomasanderson@chromium.org>
Commit-Queue: Thomas Anderson <thomasanderson@chromium.org>
Cr-Commit-Position: refs/heads/main@{#1052077}
---
build/linux/unbundle/jsoncpp.gn | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/build/linux/unbundle/jsoncpp.gn b/build/linux/unbundle/jsoncpp.gn
index 544f9d13c9..e84a0ef27a 100644
--- a/build/linux/unbundle/jsoncpp.gn
+++ b/build/linux/unbundle/jsoncpp.gn
@@ -3,6 +3,11 @@ import("//build/shim_headers.gni")
pkg_config("jsoncpp_config") {
packages = [ "jsoncpp" ]
+
+ # Defining JSON_DLL_BUILD applies public visibility to jsoncpp classes
+ # thus deactivating CFI checks for them. This avoids CFI violations in
+ # virtual calls to system jsoncpp library (https://crbug.com/1365218).
+ defines = [ "JSON_DLL_BUILD" ]
}
shim_headers("jsoncpp_shim") {

View file

@ -0,0 +1,18 @@
--- a/base/allocator/partition_allocator/tagging.cc
+++ b/base/allocator/partition_allocator/tagging.cc
@@ -19,15 +19,6 @@
#define PR_GET_TAGGED_ADDR_CTRL 56
#define PR_TAGGED_ADDR_ENABLE (1UL << 0)
-#if BUILDFLAG(IS_LINUX)
-#include <linux/version.h>
-
-// Linux headers already provide these since v5.10.
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
-#define HAS_PR_MTE_MACROS
-#endif
-#endif
-
#ifndef HAS_PR_MTE_MACROS
#define PR_MTE_TCF_SHIFT 1
#define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)

View file

@ -1,11 +0,0 @@
--- ./v8/src/base/cpu.cc.orig
+++ ./v8/src/base/cpu.cc
@@ -14,7 +14,7 @@
#if V8_OS_LINUX
#include <linux/auxvec.h> // AT_HWCAP
#endif
-#if V8_GLIBC_PREREQ(2, 16) || V8_OS_ANDROID
+#if 1
#include <sys/auxv.h> // getauxval()
#endif
#if V8_OS_QNX

View file

@ -1,10 +1,7 @@
musl uses different syscalls from glibc for some functions, so the sandbox has
to account for that
--
diff --git a/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc ./sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
index ff5a1c0..da56b9b 100644
--- a/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
+++ ./sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
+++ b/sandbox/linux/seccomp-bpf-helpers/syscall_parameters_restrictions.cc
@@ -139,21 +139,11 @@ namespace sandbox {
// present (as in newer versions of posix_spawn).
ResultExpr RestrictCloneToThreadsAndEPERMFork() {
@ -44,8 +41,8 @@ index ff5a1c0..da56b9b 100644
diff --git a/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc ./sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc
index d9d1882..0567557 100644
--- a/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc
+++ ./sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc
@@ -438,6 +438,7 @@
+++ b/sandbox/linux/seccomp-bpf-helpers/syscall_sets.cc
@@ -392,6 +392,7 @@ bool SyscallSets::IsAllowedProcessStartOrDeath(int sysno) {
#if defined(__i386__)
case __NR_waitpid:
#endif
@ -53,7 +50,7 @@ index d9d1882..0567557 100644
return true;
case __NR_clone: // Should be parameter-restricted.
case __NR_setns: // Privileged.
@@ -450,7 +451,6 @@
@@ -404,7 +405,6 @@ bool SyscallSets::IsAllowedProcessStartOrDeath(int sysno) {
#if defined(__i386__) || defined(__x86_64__) || defined(__mips__)
case __NR_set_thread_area:
#endif
@ -61,16 +58,16 @@ index d9d1882..0567557 100644
case __NR_unshare:
#if !defined(__mips__) && !defined(__aarch64__)
case __NR_vfork:
@@ -549,6 +549,8 @@
@@ -514,6 +514,8 @@ bool SyscallSets::IsAllowedAddressSpaceAccess(int sysno) {
case __NR_mlock:
case __NR_munlock:
case __NR_munmap:
case __NR_mseal:
+ case __NR_mremap:
+ case __NR_membarrier:
return true;
case __NR_madvise:
case __NR_mincore:
@@ -566,7 +568,6 @@
@@ -531,7 +533,6 @@ bool SyscallSets::IsAllowedAddressSpaceAccess(int sysno) {
case __NR_modify_ldt:
#endif
case __NR_mprotect:
@ -90,35 +87,21 @@ index 2b78a0c..b6fedb5 100644
#if defined(__x86_64__)
#include "sandbox/linux/system_headers/x86_64_linux_syscalls.h"
diff --git a/services/service_manager/sandbox/linux/bpf_renderer_policy_linux.cc ./services/service_manager/sandbox/linux/bpf_renderer_policy_linux.cc
index a85c0ea..715aa1e 100644
--- a/sandbox/policy/linux/bpf_renderer_policy_linux.cc
+++ b/sandbox/policy/linux/bpf_renderer_policy_linux.cc
@@ -94,6 +94,9 @@
case __NR_pwrite64:
case __NR_sched_get_priority_max:
case __NR_sched_get_priority_min:
+ case __NR_sched_getparam:
+ case __NR_sched_getscheduler:
+ case __NR_sched_setscheduler:
case __NR_sysinfo:
case __NR_times:
case __NR_uname:
--- a/sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc
+++ b/sandbox/linux/seccomp-bpf-helpers/baseline_policy.cc
@@ -225,10 +225,15 @@
if (sysno == __NR_getpriority || sysno ==__NR_setpriority)
return RestrictGetSetpriority(current_pid);
+ // XXX: hacks for musl sandbox, calls needed?
+ if (sysno == __NR_sched_getparam || sysno == __NR_sched_getscheduler ||
+ sysno == __NR_sched_setscheduler) {
+ return Allow();
+ }
+
// The scheduling syscalls are used in threading libraries and also heavily in
// abseil. See for example https://crbug.com/1370394.
- if (sysno == __NR_sched_getaffinity || sysno == __NR_sched_getparam ||
- sysno == __NR_sched_getscheduler || sysno == __NR_sched_setscheduler) {
+ if (sysno == __NR_sched_getaffinity) {
return RestrictSchedTarget(current_pid, sysno);
}
@@ -102,11 +102,11 @@
#if defined(__arm__) || defined(__aarch64__)
case __NR_getcpu:
#endif
- return Allow();
- case __NR_sched_getaffinity:
case __NR_sched_getparam:
case __NR_sched_getscheduler:
case __NR_sched_setscheduler:
+ return Allow();
+ case __NR_sched_getaffinity:
return RestrictSchedTarget(GetPolicyPid(), sysno);
case __NR_prlimit64:
// See crbug.com/662450 and setrlimit comment above.

View file

@ -1,7 +1,3 @@
the sandbox caching of thread id's only works with glibc
see: https://gitlab.alpinelinux.org/alpine/aports/-/merge_requests/32356
see: https://gitlab.alpinelinux.org/alpine/aports/-/issues/13579
--
--- a/sandbox/linux/services/namespace_sandbox.cc
+++ b/sandbox/linux/services/namespace_sandbox.cc
@@ -209,6 +209,70 @@

View file

@ -1,6 +1,5 @@
use monotonic clock for pthread_cond_timedwait with musl too, since it supports
it
--
Use monotonic clock for pthread_cond_timedwait with musl too.
--- a/v8/src/base/platform/condition-variable.cc
+++ b/v8/src/base/platform/condition-variable.cc
@@ -16,7 +16,7 @@

View file

@ -1,8 +1,57 @@
musl does not have execinfo.h, and hence no implementation of
. backtrace()
. backtrace_symbols()
for discussion about this, see https://www.openwall.com/lists/musl/2021/07/16/1
--
--- a/base/debug/stack_trace_posix.cc
+++ b/base/debug/stack_trace_posix.cc
@@ -27,7 +27,7 @@
#if !defined(USE_SYMBOLIZE)
#include <cxxabi.h>
#endif
-#if !defined(__UCLIBC__) && !defined(_AIX)
+#if defined(__GLIBC__) && !defined(_AIX)
#include <execinfo.h>
#endif
@@ -89,7 +89,7 @@
// Note: code in this function is NOT async-signal safe (std::string uses
// malloc internally).
-#if !defined(__UCLIBC__) && !defined(_AIX)
+#if defined(__GLIBC__) && !defined(_AIX)
std::string::size_type search_from = 0;
while (search_from < text->size()) {
// Look for the start of a mangled symbol, from search_from.
@@ -136,7 +136,7 @@
virtual ~BacktraceOutputHandler() = default;
};
-#if !defined(__UCLIBC__) && !defined(_AIX)
+#if defined(__GLIBC__) && !defined(_AIX)
void OutputPointer(void* pointer, BacktraceOutputHandler* handler) {
// This should be more than enough to store a 64-bit number in hex:
// 16 hex digits + 1 for null-terminator.
@@ -839,7 +839,7 @@
// If we do not have unwind tables, then try tracing using frame pointers.
return base::debug::TraceStackFramePointers(const_cast<const void**>(trace),
count, 0);
-#elif !defined(__UCLIBC__) && !defined(_AIX)
+#elif defined(__GLIBC__) && !defined(_AIX)
// Though the backtrace API man page does not list any possible negative
// return values, we take no chance.
return base::saturated_cast<size_t>(backtrace(trace, count));
@@ -852,13 +852,13 @@
// NOTE: This code MUST be async-signal safe (it's used by in-process
// stack dumping signal handler). NO malloc or stdio is allowed here.
-#if !defined(__UCLIBC__) && !defined(_AIX)
+#if defined(__GLIBC__) && !defined(_AIX)
PrintBacktraceOutputHandler handler;
ProcessBacktrace(trace_, count_, prefix_string, &handler);
#endif
}
-#if !defined(__UCLIBC__) && !defined(_AIX)
+#if defined(__GLIBC__) && !defined(_AIX)
void StackTrace::OutputToStreamWithPrefix(std::ostream* os,
const char* prefix_string) const {
StreamBacktraceOutputHandler handler(os);
--- a/v8/src/codegen/external-reference-table.cc
+++ b/v8/src/codegen/external-reference-table.cc
@@ -11,7 +11,9 @@
@ -37,32 +86,22 @@ for discussion about this, see https://www.openwall.com/lists/musl/2021/07/16/1
#define HAVE_FCNTL_H 1
--- a/base/debug/stack_trace.cc
+++ b/base/debug/stack_trace.cc
@@ -311,7 +311,7 @@
@@ -251,7 +253,9 @@
}
std::string StackTrace::ToStringWithPrefix(cstring_view prefix_string) const {
void StackTrace::OutputToStream(std::ostream* os) const {
+#if defined(__GLIBC__) && !defined(_AIX)
OutputToStreamWithPrefix(os, nullptr);
+#endif
}
std::string StackTrace::ToString() const {
@@ -281,7 +281,7 @@
}
std::string StackTrace::ToStringWithPrefix(const char* prefix_string) const {
std::stringstream stream;
-#if !defined(__UCLIBC__) && !defined(_AIX)
+#if defined(__GLIBC__) && !defined(_AIX)
OutputToStreamWithPrefix(&stream, prefix_string);
#endif
return stream.str();
@@ -335,7 +335,7 @@
}
std::ostream& operator<<(std::ostream& os, const StackTrace& s) {
-#if !defined(__UCLIBC__) && !defined(_AIX)
+#if defined(__GLIBC__) && !defined(_AIX)
s.OutputToStream(&os);
#else
os << "StackTrace::OutputToStream not implemented.";
--- a/base/debug/stack_trace_unittest.cc
+++ b/base/debug/stack_trace_unittest.cc
@@ -33,7 +33,7 @@
typedef testing::Test StackTraceTest;
#endif
-#if !defined(__UCLIBC__) && !defined(_AIX)
+#if !defined(__UCLIBC__) && !defined(_AIX) && defined(__GLIBC__)
// StackTrace::OutputToStream() is not implemented under uclibc, nor AIX.
// See https://crbug.com/706728

View file

@ -0,0 +1,19 @@
--- a/chrome/browser/metrics/chrome_browser_main_extra_parts_metrics.cc
+++ b/chrome/browser/metrics/chrome_browser_main_extra_parts_metrics.cc
@@ -61,7 +61,6 @@
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)
-#include <gnu/libc-version.h>
#include "base/linux_util.h"
#include "base/strings/string_split.h"
@@ -324,7 +323,7 @@
void RecordLinuxGlibcVersion() {
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)
+#if defined(__GLIBC__) || BUILDFLAG(IS_CHROMEOS_LACROS)
base::Version version(gnu_get_libc_version());
UMALinuxGlibcVersion glibc_version_result = UMA_LINUX_GLIBC_NOT_PARSEABLE;

View file

@ -1,6 +1,3 @@
musl does not implement mallinfo()/mallinfo2()
(or rather, malloc-ng, musl's allocator, doesn't)
--
--- a/base/trace_event/malloc_dump_provider.cc
+++ b/base/trace_event/malloc_dump_provider.cc
@@ -185,7 +185,6 @@
@ -60,8 +57,8 @@ musl does not implement mallinfo()/mallinfo2()
// TODO(fuchsia): Not currently exposed. https://crbug.com/735087.
return 0;
#endif
--- ./third_party/tflite/src/tensorflow/lite/profiling/memory_info.cc.orig
+++ ./third_party/tflite/src/tensorflow/lite/profiling/memory_info.cc
--- a/third_party/tflite/src/tensorflow/lite/profiling/memory_info.cc
+++ b/third_party/tflite/src/tensorflow/lite/profiling/memory_info.cc
@@ -35,7 +35,7 @@
MemoryUsage GetMemoryUsage() {
@ -71,8 +68,8 @@ musl does not implement mallinfo()/mallinfo2()
rusage res;
if (getrusage(RUSAGE_SELF, &res) == 0) {
result.max_rss_kb = res.ru_maxrss;
--- ./third_party/swiftshader/third_party/llvm-subzero/lib/Support/Unix/Process.inc
+++ ./third_party/swiftshader/third_party/llvm-subzero/lib/Support/Unix/Process.inc.orig
--- a/third_party/swiftshader/third_party/llvm-subzero/lib/Support/Unix/Process.inc
+++ b/third_party/swiftshader/third_party/llvm-subzero/lib/Support/Unix/Process.inc
@@ -86,11 +86,11 @@
}
@ -88,8 +85,8 @@ musl does not implement mallinfo()/mallinfo2()
mi = ::mallinfo();
return mi.uordblks;
--- ./third_party/swiftshader/third_party/llvm-10.0/configs/linux/include/llvm/Config/config.h.orig 2019-09-30 13:03:42.556880537 -0400
+++ ./third_party/swiftshader/third_party/llvm-10.0/configs/linux/include/llvm/Config/config.h 2019-09-30 13:07:27.989821227 -0400
--- a/third_party/swiftshader/third_party/llvm-10.0/configs/linux/include/llvm/Config/config.h
+++ b/third_party/swiftshader/third_party/llvm-10.0/configs/linux/include/llvm/Config/config.h
@@ -122,7 +122,9 @@
/* #undef HAVE_MALLCTL */
@ -100,25 +97,14 @@ musl does not implement mallinfo()/mallinfo2()
/* Define to 1 if you have the <malloc.h> header file. */
#define HAVE_MALLOC_H 1
--- a/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.cc
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.cc
@@ -660,7 +660,7 @@
--- a/base/allocator/allocator_shim_default_dispatch_to_partition_alloc.cc
+++ b/base/allocator/allocator_shim_default_dispatch_to_partition_alloc.cc
@@ -717,7 +717,7 @@
#endif // !PA_BUILDFLAG(IS_APPLE) && !PA_BUILDFLAG(IS_ANDROID)
#endif // !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_ANDROID)
-#if PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_CHROMEOS)
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+#if 0
SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
partition_alloc::SimplePartitionStatsDumper allocator_dumper;
base::SimplePartitionStatsDumper allocator_dumper;
Allocator()->DumpStats("malloc", true, &allocator_dumper);
--- a/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc_unittest.cc
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc_unittest.cc
@@ -29,7 +29,7 @@
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// Platforms on which we override weak libc symbols.
-#if PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_CHROMEOS)
+#if (PA_BUILDFLAG(IS_LINUX) && defined(__GLIBC__)) || PA_BUILDFLAG(IS_CHROMEOS)
PA_NOINLINE void FreeForTest(void* data) {
free(data);

View file

@ -1,5 +1,3 @@
similar to dns-resolver.patch, musl doesn't have res_ninit and so on
--
--- a/net/dns/public/scoped_res_state.cc
+++ b/net/dns/public/scoped_res_state.cc
@@ -13,7 +13,7 @@

View file

@ -1,14 +0,0 @@
this optimisation of CLONE_SETTLS is not valid used like this, and future musl
clone(3) will EINVAL on this use
--
--- a/sandbox/linux/services/credentials.cc
+++ b/sandbox/linux/services/credentials.cc
@@ -89,7 +89,7 @@
int clone_flags = CLONE_FS | LINUX_SIGCHLD;
void* tls = nullptr;
-#if (defined(ARCH_CPU_X86_64) || defined(ARCH_CPU_ARM_FAMILY)) && \
+#if 0 && (defined(ARCH_CPU_X86_64) || defined(ARCH_CPU_ARM_FAMILY)) && \
!defined(MEMORY_SANITIZER)
// Use CLONE_VM | CLONE_VFORK as an optimization to avoid copying page tables.
// Since clone writes to the new child's TLS before returning, we must set a

View file

@ -0,0 +1,12 @@
--- a/base/files/file.h
+++ b/base/files/file.h
@@ -19,7 +19,8 @@
#include "build/build_config.h"
#if BUILDFLAG(IS_BSD) || BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_NACL) || \
- BUILDFLAG(IS_FUCHSIA) || (BUILDFLAG(IS_ANDROID) && __ANDROID_API__ < 21)
+ BUILDFLAG(IS_FUCHSIA) || (BUILDFLAG(IS_ANDROID) && __ANDROID_API__ < 21) || \
+ (defined(OS_LINUX) && !defined(__GLIBC__))
struct stat;
namespace base {
typedef struct stat stat_wrapper_t;

View file

@ -0,0 +1,11 @@
--- a/chrome/browser/ui/autofill/autofill_popup_controller_impl.h
+++ b/chrome/browser/ui/autofill/autofill_popup_controller_impl.h
@@ -178,7 +178,7 @@
class AutofillPopupViewPtr {
public:
AutofillPopupViewPtr() = default;
- AutofillPopupViewPtr(nullptr_t) : ptr_(nullptr) {}
+ AutofillPopupViewPtr(std::nullptr_t) : ptr_(nullptr) {}
AutofillPopupViewPtr(AutofillPopupView* ptr) : ptr_(ptr) {}
explicit operator bool() const { return ptr_; }

View file

@ -1,25 +0,0 @@
Hard-disable memory tagging on ARM64 - it does exist there but musl is
missing some required interface headers for it, and it's not clear how
to make the partalloc support code for it work.
--- ./base/allocator/partition_allocator/partition_alloc.gni.orig
+++ ./base/allocator/partition_allocator/partition_alloc.gni
@@ -30,7 +30,7 @@
}
has_memory_tagging =
- current_cpu == "arm64" && is_clang && !is_asan && (is_linux || is_android)
+ false
declare_args() {
# Causes all the allocations to be routed via allocator_shim.cc. Usually,
--- ./base/allocator/partition_allocator/src/partition_alloc/aarch64_support.h.orig
+++ ./base/allocator/partition_allocator/src/partition_alloc/aarch64_support.h
@@ -10,7 +10,7 @@
#include "partition_alloc/build_config.h"
#include "partition_alloc/partition_alloc_buildflags.h"
-#if PA_BUILDFLAG(IS_ANDROID) || PA_BUILDFLAG(IS_LINUX)
+#if PA_BUILDFLAG(IS_ANDROID) || (PA_BUILDFLAG(IS_LINUX) && defined(__GLIBC__))
#define HAS_HW_CAPS
#endif

View file

@ -0,0 +1,15 @@
--- a/base/allocator/partition_allocator/partition_root.cc
+++ b/base/allocator/partition_allocator/partition_root.cc
@@ -248,9 +248,9 @@
// However, no perfect solution really exists to make threads + fork()
// cooperate, but deadlocks are real (and fork() is used in DEATH_TEST()s),
// and other malloc() implementations use the same techniques.
- int err =
- pthread_atfork(BeforeForkInParent, AfterForkInParent, AfterForkInChild);
- PA_CHECK(err == 0);
+ //int err =
+ // pthread_atfork(BeforeForkInParent, AfterForkInParent, AfterForkInChild);
+ //PA_CHECK(err == 0);
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
}

View file

@ -1,23 +0,0 @@
Patch-Source: https://webrtc-review.googlesource.com/c/src/+/380500
---
--- a/third_party/webrtc/modules/video_capture/linux/pipewire_session.cc
+++ b/third_party/webrtc/modules/video_capture/linux/pipewire_session.cc
@@ -87,7 +87,7 @@
.param = OnNodeParam,
};
- pw_node_add_listener(proxy_, &node_listener_, &node_events, this);
+ pw_node_add_listener(reinterpret_cast<pw_node*>(proxy_), &node_listener_, &node_events, this);
}
// static
@@ -119,7 +119,7 @@
uint32_t id = info->params[i].id;
if (id == SPA_PARAM_EnumFormat &&
info->params[i].flags & SPA_PARAM_INFO_READ) {
- pw_node_enum_params(that->proxy_, 0, id, 0, UINT32_MAX, nullptr);
+ pw_node_enum_params(reinterpret_cast<pw_node*>(that->proxy_), 0, id, 0, UINT32_MAX, nullptr);
break;
}
}

View file

@ -1,33 +0,0 @@
the pvalloc/valloc symbols are obsolete and not implemented in musl
--
--- a/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_unittest.cc
+++ b/base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim_unittest.cc
@@ -410,7 +410,7 @@
ASSERT_GE(aligned_allocs_intercepted_by_alignment[128], 1u);
ASSERT_GE(aligned_allocs_intercepted_by_size[53], 1u);
-#if PA_BUILDFLAG(IS_POSIX) && !PA_BUILDFLAG(IS_ANDROID)
+#if (PA_BUILDFLAG(IS_POSIX) && defined(__GLIBC__)) && !PA_BUILDFLAG(IS_ANDROID)
void* pvalloc_ptr = pvalloc(67);
ASSERT_NE(nullptr, pvalloc_ptr);
ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(pvalloc_ptr) % kPageSize);
@@ -449,7 +449,7 @@
free(memalign_ptr);
ASSERT_GE(frees_intercepted_by_addr[Hash(memalign_ptr)], 1u);
-#if PA_BUILDFLAG(IS_POSIX) && !PA_BUILDFLAG(IS_ANDROID)
+#if (PA_BUILDFLAG(IS_POSIX) && defined(__GLIBC__)) && !PA_BUILDFLAG(IS_ANDROID)
free(pvalloc_ptr);
ASSERT_GE(frees_intercepted_by_addr[Hash(pvalloc_ptr)], 1u);
#endif // PA_BUILDFLAG(IS_POSIX) && !PA_BUILDFLAG(IS_ANDROID)
--- a/base/process/memory_unittest.cc
+++ b/base/process/memory_unittest.cc
@@ -359,7 +359,7 @@
#endif // BUILDFLAG(IS_WIN)
#endif // !BUILDFLAG(IS_MAC)
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+#if (BUILDFLAG(IS_LINUX) && defined(__GLIBC__)) || BUILDFLAG(IS_CHROMEOS)
TEST_F(OutOfMemoryDeathTest, Valloc) {
ASSERT_OOM_DEATH({

View file

@ -0,0 +1,11 @@
--- a/tools/grit/grit/util.py
+++ b/tools/grit/grit/util.py
@@ -209,7 +209,7 @@
mode = 'rb'
encoding = None
else:
- mode = 'rU'
+ mode = 'r'
with io.open(filename, mode, encoding=encoding) as f:
return f.read()

View file

@ -0,0 +1,11 @@
--- a/net/third_party/quiche/src/quiche/quic/core/quic_one_block_arena.h
+++ b/net/third_party/quiche/src/quiche/quic/core/quic_one_block_arena.h
@@ -69,7 +69,7 @@
// QuicConnections currently use around 1KB of polymorphic types which would
// ordinarily be on the heap. Instead, store them inline in an arena.
-using QuicConnectionArena = QuicOneBlockArena<1280>;
+using QuicConnectionArena = QuicOneBlockArena<1504>;
} // namespace quic

View file

@ -0,0 +1,22 @@
for some reason this breaks and the fd returned after close() after a few
cycles is still in the lock array
so, just don't enforce or wrap anything.
--- a/base/files/scoped_file_linux.cc
+++ b/base/files/scoped_file_linux.cc
@@ -77,15 +77,3 @@
}
} // namespace base
-
-extern "C" {
-
-int __close(int);
-
-__attribute__((visibility("default"), noinline)) int close(int fd) {
- if (base::IsFDOwned(fd) && g_is_ownership_enforced)
- CrashOnFdOwnershipViolation();
- return __close(fd);
-}
-
-} // extern "C"

View file

@ -0,0 +1,53 @@
--- a/third_party/electron_node/BUILD.gn
+++ b/third_party/electron_node/BUILD.gn
@@ -42,6 +42,18 @@
node_module_version = ""
}
+if (is_linux) {
+ import("//build/config/linux/pkg_config.gni")
+
+ pkg_config("cares") {
+ packages = [ "libcares" ]
+ }
+
+ pkg_config("nghttp2") {
+ packages = [ "libnghttp2" ]
+ }
+}
+
assert(!node_use_dtrace, "node_use_dtrace not supported in GN")
assert(!node_use_etw, "node_use_etw not supported in GN")
@@ -182,11 +194,9 @@
component("node_lib") {
deps = [
":node_js2c",
- "deps/cares",
"deps/histogram",
"deps/googletest:gtest",
"deps/llhttp",
- "deps/nghttp2",
"deps/uvwasi",
"//third_party/zlib",
"//third_party/brotli:dec",
@@ -202,6 +212,19 @@
public_configs = [ ":node_lib_config" ]
include_dirs = [ "src" ]
libs = []
+ if (is_linux) {
+ configs += [
+ ":cares",
+ ":nghttp2",
+ ]
+ libs += [ "http_parser" ]
+ } else {
+ deps += [
+ "deps/cares",
+ "deps/http_parser",
+ "deps/nghttp2",
+ ]
+ }
frameworks = []
cflags_cc = [
"-Wno-deprecated-declarations",

View file

@ -1,5 +1,4 @@
random glibc macro, not provided by musl.
https://www.gnu.org/software/libc/manual/html_node/Interrupted-Primitives.html
random glibc macro
--- a/sandbox/linux/suid/process_util.h
+++ b/sandbox/linux/suid/process_util.h
@@ -11,6 +11,14 @@

View file

@ -0,0 +1,113 @@
--- a/chrome/browser/process_singleton_posix.cc
+++ b/chrome/browser/process_singleton_posix.cc
@@ -607,7 +607,7 @@
// |reader| is for sending back ACK message.
void HandleMessage(const std::string& current_dir,
const std::vector<std::string>& argv,
- const std::vector<const uint8_t> additional_data,
+ const std::vector<uint8_t> additional_data,
SocketReader* reader);
private:
@@ -664,7 +664,7 @@
void ProcessSingleton::LinuxWatcher::HandleMessage(
const std::string& current_dir,
const std::vector<std::string>& argv,
- const std::vector<const uint8_t> additional_data,
+ const std::vector<uint8_t> additional_data,
SocketReader* reader) {
DCHECK(ui_task_runner_->BelongsToCurrentThread());
DCHECK(reader);
@@ -754,7 +754,7 @@
base::StringToSizeT(tokens[0], &num_args);
std::vector<std::string> command_line(tokens.begin() + 1, tokens.begin() + 1 + num_args);
- std::vector<const uint8_t> additional_data;
+ std::vector<uint8_t> additional_data;
if (tokens.size() >= 3 + num_args) {
size_t additional_data_size;
base::StringToSizeT(tokens[1 + num_args], &additional_data_size);
@@ -763,7 +763,7 @@
std::string(1, kTokenDelimiter));
const uint8_t* additional_data_bits =
reinterpret_cast<const uint8_t*>(remaining_args.c_str());
- additional_data = std::vector<const uint8_t>(
+ additional_data = std::vector<uint8_t>(
additional_data_bits, additional_data_bits + additional_data_size);
}
--- a/chrome/browser/process_singleton.h
+++ b/chrome/browser/process_singleton.h
@@ -102,7 +102,7 @@
using NotificationCallback =
base::RepeatingCallback<bool(const base::CommandLine& command_line,
const base::FilePath& current_directory,
- const std::vector<const uint8_t> additional_data)>;
+ const std::vector<uint8_t> additional_data)>;
#if BUILDFLAG(IS_WIN)
ProcessSingleton(const std::string& program_name,
--- a/chrome/browser/process_singleton_win.cc
+++ b/chrome/browser/process_singleton_win.cc
@@ -81,7 +81,7 @@
bool ParseCommandLine(const COPYDATASTRUCT* cds,
base::CommandLine* parsed_command_line,
base::FilePath* current_directory,
- std::vector<const uint8_t>* parsed_additional_data) {
+ std::vector<uint8_t>* parsed_additional_data) {
// We should have enough room for the shortest command (min_message_size)
// and also be a multiple of wchar_t bytes. The shortest command
// possible is L"START\0\0" (empty command line, current directory,
@@ -163,7 +163,7 @@
msg.substr(fourth_null + 1, fifth_null - fourth_null);
const uint8_t* additional_data_bytes =
reinterpret_cast<const uint8_t*>(additional_data.c_str());
- *parsed_additional_data = std::vector<const uint8_t>(additional_data_bytes,
+ *parsed_additional_data = std::vector<uint8_t>(additional_data_bytes,
additional_data_bytes + additional_data_length);
return true;
@@ -187,7 +187,7 @@
base::CommandLine parsed_command_line(base::CommandLine::NO_PROGRAM);
base::FilePath current_directory;
- std::vector<const uint8_t> additional_data;
+ std::vector<uint8_t> additional_data;
if (!ParseCommandLine(cds, &parsed_command_line, &current_directory, &additional_data)) {
*result = TRUE;
return true;
--- a/electron/shell/browser/api/electron_api_app.cc
+++ b/electron/shell/browser/api/electron_api_app.cc
@@ -519,10 +519,10 @@
const base::RepeatingCallback<
void(const base::CommandLine& command_line,
const base::FilePath& current_directory,
- const std::vector<const uint8_t> additional_data)>& callback,
+ const std::vector<uint8_t> additional_data)>& callback,
const base::CommandLine& cmd,
const base::FilePath& cwd,
- const std::vector<const uint8_t> additional_data) {
+ const std::vector<uint8_t> additional_data) {
// Make sure the callback is called after app gets ready.
if (Browser::Get()->is_ready()) {
callback.Run(cmd, cwd, std::move(additional_data));
@@ -1082,7 +1082,7 @@
void App::OnSecondInstance(const base::CommandLine& cmd,
const base::FilePath& cwd,
- const std::vector<const uint8_t> additional_data) {
+ const std::vector<uint8_t> additional_data) {
v8::Isolate* isolate = JavascriptEnvironment::GetIsolate();
v8::Locker locker(isolate);
v8::HandleScope handle_scope(isolate);
--- a/electron/shell/browser/api/electron_api_app.h
+++ b/electron/shell/browser/api/electron_api_app.h
@@ -195,7 +195,7 @@
std::string GetLocaleCountryCode();
void OnSecondInstance(const base::CommandLine& cmd,
const base::FilePath& cwd,
- const std::vector<const uint8_t> additional_data);
+ const std::vector<uint8_t> additional_data);
bool HasSingleInstanceLock() const;
bool RequestSingleInstanceLock(gin::Arguments* args);
void ReleaseSingleInstanceLock();

View file

@ -0,0 +1,20 @@
--- a/third_party/blink/renderer/platform/wtf/stack_util.cc
+++ b/third_party/blink/renderer/platform/wtf/stack_util.cc
@@ -29,7 +29,7 @@
// FIXME: On Mac OSX and Linux, this method cannot estimate stack size
// correctly for the main thread.
-#elif defined(__GLIBC__) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FREEBSD) || \
+#elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FREEBSD) || \
BUILDFLAG(IS_FUCHSIA)
// pthread_getattr_np() can fail if the thread is not invoked by
// pthread_create() (e.g., the main thread of blink_unittests).
@@ -97,7 +97,7 @@
}
void* GetStackStart() {
-#if defined(__GLIBC__) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FREEBSD) || \
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FREEBSD) || \
BUILDFLAG(IS_FUCHSIA)
pthread_attr_t attr;
int error;

View file

@ -1,11 +0,0 @@
--- ./buildtools/third_party/libc++/__config_site.orig
+++ ./buildtools/third_party/libc++/__config_site
@@ -18,7 +18,7 @@
/* #undef _LIBCPP_ABI_FORCE_MICROSOFT */
/* #undef _LIBCPP_HAS_NO_THREADS */
/* #undef _LIBCPP_HAS_NO_MONOTONIC_CLOCK */
-/* #undef _LIBCPP_HAS_MUSL_LIBC */
+#define _LIBCPP_HAS_MUSL_LIBC 1
/* #undef _LIBCPP_HAS_THREAD_API_PTHREAD */
/* #undef _LIBCPP_HAS_THREAD_API_EXTERNAL */
/* #undef _LIBCPP_HAS_THREAD_API_WIN32 */

View file

@ -1,171 +0,0 @@
# Contributor: lauren n. liberda <lauren@selfisekai.rocks>
maintainer="lauren n. liberda <lauren@selfisekai.rocks>"
pkgname=element-desktop
pkgver=1.11.100
pkgrel=1
pkgdesc="Secure and independent communication, connected via Matrix"
url="https://element.io/"
arch="aarch64 x86_64" # same as electron
license="GPL-3.0-only"
_electronver=35
depends="
electron~$_electronver
font-inconsolata
font-inter
font-nunito
font-opensans
font-twemoji
"
makedepends="
cargo
electron-dev~$_electronver
electron-tasje
jq
libsecret-dev
nodejs
npm
python3
py3-setuptools
sqlcipher-dev
swc
yarn
"
source="
https://github.com/vector-im/element-desktop/archive/refs/tags/v$pkgver/element-desktop-$pkgver.tar.gz
https://github.com/vector-im/element-web/archive/refs/tags/v$pkgver/element-web-$pkgver.tar.gz
add-alpine-targets.patch
use-system-headers.patch
tasje-one-hak.patch
tasje-no-fuses.patch
no-source-maps.patch.web
use-system-fonts.patch.web
element-desktop
"
options="net !check" # broken
# secfixes:
# 1.11.30-r0:
# - CVE-2023-30609
# 1.11.26-r0:
# - CVE-2023-28103
# - CVE-2023-28427
# 1.11.7-r0:
# - CVE-2022-39249
# - CVE-2022-39250
# - CVE-2022-39251
# - CVE-2022-39236
# 1.11.4-r0:
# - CVE-2022-36059
# - CVE-2022-36060
# used by buildscripts (at least web's webpack)
export VERSION=$pkgver
export CARGO_PROFILE_RELEASE_OPT_LEVEL=2
export CARGO_PROFILE_RELEASE_STRIP="symbols"
export NODE_OPTIONS="--openssl-legacy-provider"
prepare() {
default_prepare
msg "Applying more patches"
for x in $source; do
case "$x" in
*.patch.web)
msg "$x"
patch -p1 -i "$srcdir"/$x -d "$srcdir"/element-web-$pkgver
;;
esac
done
rm -rf res/fonts
(
cd "$srcdir"/element-web-$pkgver
msg "Fetch element-web dependencies"
yarn install --frozen-lockfile --ignore-scripts --ignore-engines
jq '.show_labs_settings = true' < config.sample.json > config.json
)
ln -s "$srcdir"/element-web-$pkgver/webapp webapp
msg "Fetch element-desktop dependencies"
yarn install --frozen-lockfile --ignore-scripts
}
build() {
(
cd "$srcdir"/element-web-$pkgver
msg "Build element-web"
NODE_ENV=production yarn build
)
msg "Build element-desktop"
yarn asar-webapp
# add "optional" native dependencies
# hak stands for hack
yarn run hak --target "$(uname -m)-alpine-linux-musl"
yarn build:ts
yarn build:res
# we need it as js to be of any use for tasje.
# fails with `yarn tsc`. https://github.com/electron-userland/electron-builder/issues/7961
swc compile electron-builder.ts --out-file electron-builder.mjs
yarn install --frozen-lockfile --ignore-scripts --production
npm rebuild keytar-forked --nodedir=/usr/include/electron/node_headers --build-from-source
find node_modules/keytar-forked/build/ -type f \
\! -path node_modules/keytar-forked/build/Release/keytar.node \
-delete
# stripping in build because it gets into asar
strip node_modules/keytar-forked/build/Release/keytar.node
tasje -c electron-builder.mjs pack
}
check() {
(
cd "$srcdir"/element-web-$pkgver
yarn test
)
}
package() {
local resources="dist/resources"
install -Dm644 $resources/app.asar "$pkgdir"/usr/lib/element-desktop/app.asar
install -Dm644 webapp.asar "$pkgdir"/usr/lib/element-desktop/webapp.asar
cp -r $resources/app.asar.unpacked "$pkgdir"/usr/lib/element-desktop/app.asar.unpacked
install -Dm644 $resources/img/element.png "$pkgdir"/usr/lib/element-desktop/img/element.png
install -Dm755 "$srcdir"/$pkgname "$pkgdir"/usr/bin/$pkgname
install -Dm644 dist/$pkgname.desktop "$pkgdir"/usr/share/applications/$pkgname.desktop
while read -r size; do
install -Dm644 dist/icons/$size.png "$pkgdir"/usr/share/icons/hicolor/$size/apps/$pkgname.png
done < dist/icons/size-list
}
sha512sums="
a8ac5180df3b204fb1947ecaa4786c9c5fdd53ffe705f4dfe97a0b051fa371fd9102c41a898e8783cf25f732674da2d987ef315f2facc7b18b20ef214ea0f81d element-desktop-1.11.100.tar.gz
e3e50fd867f9b7388eb22778ccb97d6504a480ab4e2aa9eba10fd0f4d0324a1525c2afe6c80181567d8e554f03086a9e0994a6221cfcb0ca29700754c8f31cac element-web-1.11.100.tar.gz
4747893ed3e43d3074e9afe1cdd668a6be0de073d439205fe8c38c5e0f4091cc76e3cd15d98818bea5139add29501d8d07e83c58e9da230a4ce5bb538d388f80 add-alpine-targets.patch
755b17f7b828eb6920c06a6950ad4e14c32c99d22e9c05fcef7a081b5d2034adb03db3958aa5209c99fb7201f4d888c2383fc9864c5e743dd33f8b5c4925acd7 use-system-headers.patch
92e69817fdc71f60c5c7dcbd3c7b13428cc18141cf5f27720326390f6817bec85fb1c60f8016b3a8fa275f601b16f646cda12b5e379a349368eef2f801b4de7a tasje-one-hak.patch
a1399662bdbe5e7256d31f7bdc05070e7009d70113699856b025a1d5ab1d9b8bf1020072a08255d4eddab0874044131dec296f98a91ba5c12ca61948b1d18dbe tasje-no-fuses.patch
ec635fde026f7fce8e8cc57960b5b9dcec4418416d4867ed47711422d48f068bb58a3c9ceb7715efc9c177beca3788da6b0babc9b689ea8c0724a0395f2b85f8 no-source-maps.patch.web
aaf46476bac403aa5204aa265fcf0654fad4c149fd74d0ec4273c051a5549943384cae3cdd62c5b78fdedfed55c11ecceb898b886e44165cbe7e30953a095cf9 use-system-fonts.patch.web
afc588311dc3b566a754e3e7fe6b37b99a06d47b8bbce0ed9acca8ef308fdab0bd1d41b406199e5cbdd86bdce695ff847cd8668857a235cbdc292ad8b899c063 element-desktop
"

View file

@ -1,52 +0,0 @@
--- a/scripts/hak/target.ts
+++ b/scripts/hak/target.ts
@@ -29,8 +29,10 @@
| "i686-unknown-linux-gnu"
| "x86_64-unknown-linux-musl"
| "x86_64-unknown-linux-gnu"
+ | "x86_64-alpine-linux-musl"
| "aarch64-unknown-linux-musl"
| "aarch64-unknown-linux-gnu"
+ | "aarch64-alpine-linux-musl"
| "powerpc64le-unknown-linux-musl"
| "powerpc64le-unknown-linux-gnu";
@@ -112,6 +114,13 @@
libC: MUSL,
};
+const x8664AlpineLinuxMusl: LinuxTarget = {
+ id: "x86_64-alpine-linux-musl",
+ platform: "linux",
+ arch: "x64",
+ libC: MUSL,
+};
+
const i686UnknownLinuxGnu: LinuxTarget = {
id: "i686-unknown-linux-gnu",
platform: "linux",
@@ -140,6 +149,13 @@
libC: MUSL,
};
+const aarch64AlpineLinuxMusl: LinuxTarget = {
+ id: "aarch64-alpine-linux-musl",
+ platform: "linux",
+ arch: "arm64",
+ libC: MUSL,
+};
+
const powerpc64leUnknownLinuxGnu: LinuxTarget = {
id: "powerpc64le-unknown-linux-gnu",
platform: "linux",
@@ -167,8 +183,10 @@
"i686-unknown-linux-gnu": i686UnknownLinuxGnu,
"x86_64-unknown-linux-musl": x8664UnknownLinuxMusl,
"x86_64-unknown-linux-gnu": x8664UnknownLinuxGnu,
+ "x86_64-alpine-linux-musl": x8664AlpineLinuxMusl,
"aarch64-unknown-linux-musl": aarch64UnknownLinuxMusl,
"aarch64-unknown-linux-gnu": aarch64UnknownLinuxGnu,
+ "aarch64-alpine-linux-musl": aarch64AlpineLinuxMusl,
"powerpc64le-unknown-linux-musl": powerpc64leUnknownLinuxMusl,
"powerpc64le-unknown-linux-gnu": powerpc64leUnknownLinuxGnu,
};

View file

@ -1,3 +0,0 @@
#!/bin/sh
exec electron /usr/lib/element-desktop/app.asar "$@"

View file

@ -1,18 +0,0 @@
--- ./webpack.config.js.orig
+++ ./webpack.config.js
@@ -102,15 +102,6 @@
}
const development = {};
- if (devMode) {
- // Embedded source maps for dev builds, can't use eval-source-map due to CSP
- development["devtool"] = "inline-source-map";
- } else {
- // High quality source maps in separate .map files which include the source. This doesn't bulk up the .js
- // payload file size, which is nice for performance but also necessary to get the bundle to a small enough
- // size that sentry will accept the upload.
- development["devtool"] = "source-map";
- }
// Resolve the directories for the js-sdk for later use. We resolve these early, so we
// don't have to call them over and over. We also resolve to the package.json instead of the src

Some files were not shown because too many files have changed in this diff Show more