ayaports/backports/postgresql15/jit-datalayout-mismatch-on-s390x-and-x86.patch

111 lines
3.7 KiB
Diff

From: Tom Stellard <tstellar@redhat.com>
From: Jakub Jirutka <jakub@jirutka.cz>
Date: Sat, 18 Dec 2021 23:09:03 +0100
Subject: [PATCH] jit: Workaround datalayout mismatch on s390x and x86
This patch is based on https://src.fedoraproject.org/rpms/postgresql/blob/f35/f/postgresql-datalayout-mismatch-on-s390.patch.
Original description:
> LLVM's s390x target uses a different datalayout for z13 and newer processors.
> If llvmjit_types.bc is compiled to target a processor older than z13, and
> then the JIT runs on a z13 or newer processor, then there will be a mismatch
> in datalayouts between llvmjit_types.bc and the JIT engine. This mismatch
> causes the JIT to fail at runtime.
We encountered an analogous problem even on x86 (legacy 32bit arch).
However, I didn't wanna waste my time researching what exact CPU features
are problematic on this dead architecture, so I just disabled usage of any
host specific CPU features when creating the JIT on x86. And while I was on
it, I also conditioned the s390x workaround for s390x only. -jirutka
diff --git a/src/backend/jit/llvm/llvmjit.c b/src/backend/jit/llvm/llvmjit.c
index 98a27f08bf..05b6438ba8 100644
--- a/src/backend/jit/llvm/llvmjit.c
+++ b/src/backend/jit/llvm/llvmjit.c
@@ -776,7 +776,38 @@
errhidecontext(true)));
}
+#if defined(__s390__) || defined(__s390x__)
/*
+ * For the systemz target, LLVM uses a different datalayout for z13 and newer
+ * CPUs than it does for older CPUs. This can cause a mismatch in datalayouts
+ * in the case where the llvm_types_module is compiled with a pre-z13 CPU
+ * and the JIT is running on z13 or newer.
+ * See computeDataLayout() function in
+ * llvm/lib/Target/SystemZ/SystemZTargetMachine.cpp for information on the
+ * datalayout differences.
+ */
+static bool
+needs_systemz_workaround(void)
+{
+ bool ret = false;
+ LLVMContextRef llvm_context;
+ LLVMTypeRef vec_type;
+ LLVMTargetDataRef llvm_layoutref;
+ if (strncmp(LLVMGetTargetName(llvm_targetref), "systemz", strlen("systemz")))
+ {
+ return false;
+ }
+
+ llvm_context = LLVMGetModuleContext(llvm_types_module);
+ vec_type = LLVMVectorType(LLVMIntTypeInContext(llvm_context, 32), 4);
+ llvm_layoutref = LLVMCreateTargetData(llvm_layout);
+ ret = (LLVMABIAlignmentOfType(llvm_layoutref, vec_type) == 16);
+ LLVMDisposeTargetData(llvm_layoutref);
+ return ret;
+}
+#endif
+
+/*
* Per session initialization.
*/
static void
@@ -785,6 +816,7 @@
MemoryContext oldcontext;
char *error = NULL;
char *cpu = NULL;
+ char *host_features = NULL;
char *features = NULL;
LLVMTargetMachineRef opt0_tm;
LLVMTargetMachineRef opt3_tm;
@@ -815,11 +847,22 @@
* latter is needed because some CPU architectures default to enabling
* features not all CPUs have (weird, huh).
*/
+#if !defined(__i386__) && !defined(__i386) // XXX: quick workaround for 32-bit x86
cpu = LLVMGetHostCPUName();
- features = LLVMGetHostCPUFeatures();
+ features = host_features = LLVMGetHostCPUFeatures();
elog(DEBUG2, "LLVMJIT detected CPU \"%s\", with features \"%s\"",
cpu, features);
+#endif
+#if defined(__s390__) || defined(__s390x__)
+ if (needs_systemz_workaround())
+ {
+ const char *no_vector =",-vector";
+ features = malloc(sizeof(char) * (strlen(host_features) + strlen(no_vector) + 1));
+ sprintf(features, "%s%s", host_features, no_vector);
+ }
+#endif
+
opt0_tm =
LLVMCreateTargetMachine(llvm_targetref, llvm_triple, cpu, features,
LLVMCodeGenLevelNone,
@@ -833,8 +876,13 @@
LLVMDisposeMessage(cpu);
cpu = NULL;
- LLVMDisposeMessage(features);
+ if (features != host_features)
+ {
+ free(features);
+ }
features = NULL;
+ LLVMDisposeMessage(host_features);
+ host_features = NULL;
/* force symbols in main binary to be loaded */
LLVMLoadLibraryPermanently(NULL);