[macppc, hppa?] Unbreak games/godot

classic Classic list List threaded Threaded
2 messages Options
Reply | Threaded
Open this post in threaded view
|

[macppc, hppa?] Unbreak games/godot

Charlene Wendling

> https://marc.info/?l=openbsd-ports&m=156555554325765&w=2

As promised, here is the conversion from __sync_* to __atomic_*
functions, allowing godot to build on macppc at least, and maybe
hppa, on top of the ports-gcc/sparc64 fixes, that Thomas committed a
few hours ago.

The patch was originally written for godot-3.1.1, and has been
upstreamed [0]. I backported it for godot-3.0.6, it builds
successfully on amd64 and macppc.

Due to relocations errors, i had to add address relaxing and
long calls.

Runtime cannot be tested with my video card (radeon 9700) with 3.0.6,
because it requires OpenGL(ES) 3, even for opening the editor, unlike
godot-3.1.1.

Comments/feedback are welcome,

Charlène.


[0] https://github.com/godotengine/godot/pull/31321


Index: Makefile
===================================================================
RCS file: /cvs/ports/games/godot/Makefile,v
retrieving revision 1.8
diff -u -p -u -p -r1.8 Makefile
--- Makefile 16 Aug 2019 15:38:15 -0000 1.8
+++ Makefile 16 Aug 2019 22:02:03 -0000
@@ -8,7 +8,7 @@ PKGNAME = godot-${V}
 CATEGORIES = games
 HOMEPAGE = https://godotengine.org/
 MAINTAINER = Thomas Frohwein <[hidden email]>
-REVISION = 2
+REVISION = 3
 
 # MIT
 PERMIT_PACKAGE = Yes
@@ -68,6 +68,18 @@ LIB_DEPENDS = archivers/zstd \
  net/enet
 
 NO_TEST = Yes
+
+.if ${MACHINE_ARCH:Mhppa} || ${MACHINE_ARCH:Mpowerpc}
+LDFLAGS +=     -latomic
+WANTLIB +=     atomic
+.endif
+
+# Fix relocation overflows
+.if ${MACHINE_ARCH:Mpowerpc}
+CFLAGS +=      -mlongcall
+CXXFLAGS +=    -mlongcall
+LDFLAGS +=     -Wl,--relax
+.endif
 
 pre-configure:
  ${SUBST_CMD} ${WRKSRC}/drivers/unix/os_unix.cpp
Index: patches/patch-core_safe_refcount_h
===================================================================
RCS file: patches/patch-core_safe_refcount_h
diff -N patches/patch-core_safe_refcount_h
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ patches/patch-core_safe_refcount_h 16 Aug 2019 22:02:03 -0000
@@ -0,0 +1,68 @@
+$OpenBSD$
+
+hppa, ppc: use __atomic functions as 64-bit __sync operators
+are not supported, from:
+https://github.com/godotengine/godot/pull/31321 
+
+Index: core/safe_refcount.h
+--- core/safe_refcount.h.orig
++++ core/safe_refcount.h
+@@ -99,8 +99,8 @@ static _ALWAYS_INLINE_ T atomic_exchange_if_greater(re
+
+ /* Implementation for GCC & Clang */
+
+-// GCC guarantees atomic intrinsics for sizes of 1, 2, 4 and 8 bytes.
+-// Clang states it supports GCC atomic builtins.
++#include <stdbool.h>
++#include <atomic>
+
+ template <class T>
+ static _ALWAYS_INLINE_ T atomic_conditional_increment(register T *pw) {
+@@ -109,7 +109,7 @@ static _ALWAYS_INLINE_ T atomic_conditional_increment(
+ T tmp = static_cast<T const volatile &>(*pw);
+ if (tmp == 0)
+ return 0; // if zero, can't add to it anymore
+- if (__sync_val_compare_and_swap(pw, tmp, tmp + 1) == tmp)
++ if (__atomic_compare_exchange_n(pw, &tmp, tmp + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) == true)
+ return tmp + 1;
+ }
+ }
+@@ -117,25 +117,25 @@ static _ALWAYS_INLINE_ T atomic_conditional_increment(
+ template <class T>
+ static _ALWAYS_INLINE_ T atomic_decrement(register T *pw) {
+
+- return __sync_sub_and_fetch(pw, 1);
++ return __atomic_sub_fetch(pw, 1, __ATOMIC_SEQ_CST);
+ }
+
+ template <class T>
+ static _ALWAYS_INLINE_ T atomic_increment(register T *pw) {
+
+- return __sync_add_and_fetch(pw, 1);
++ return __atomic_add_fetch(pw, 1, __ATOMIC_SEQ_CST);
+ }
+
+ template <class T, class V>
+ static _ALWAYS_INLINE_ T atomic_sub(register T *pw, register V val) {
+
+- return __sync_sub_and_fetch(pw, val);
++ return __atomic_sub_fetch(pw, val, __ATOMIC_SEQ_CST);
+ }
+
+ template <class T, class V>
+ static _ALWAYS_INLINE_ T atomic_add(register T *pw, register V val) {
+
+- return __sync_add_and_fetch(pw, val);
++ return __atomic_add_fetch(pw, val, __ATOMIC_SEQ_CST);
+ }
+
+ template <class T, class V>
+@@ -145,7 +145,7 @@ static _ALWAYS_INLINE_ T atomic_exchange_if_greater(re
+ T tmp = static_cast<T const volatile &>(*pw);
+ if (tmp >= val)
+ return tmp; // already greater, or equal
+- if (__sync_val_compare_and_swap(pw, tmp, val) == tmp)
++ if (__atomic_compare_exchange_n(pw, &tmp, val, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) == true)
+ return val;
+ }
+ }

Reply | Threaded
Open this post in threaded view
|

Re: [macppc, hppa?] Unbreak games/godot

Charlene Wendling
Ping.

On Sat, 17 Aug 2019 00:38:43 +0200
Charlene Wendling wrote:

>
> > https://marc.info/?l=openbsd-ports&m=156555554325765&w=2
>
> As promised, here is the conversion from __sync_* to __atomic_*
> functions, allowing godot to build on macppc at least, and maybe
> hppa, on top of the ports-gcc/sparc64 fixes, that Thomas committed a
> few hours ago.
>
> The patch was originally written for godot-3.1.1, and has been
> upstreamed [0]. I backported it for godot-3.0.6, it builds
> successfully on amd64 and macppc.
>
> Due to relocations errors, i had to add address relaxing and
> long calls.
>
> Runtime cannot be tested with my video card (radeon 9700) with 3.0.6,
> because it requires OpenGL(ES) 3, even for opening the editor, unlike
> godot-3.1.1.
>
> Comments/feedback are welcome,
>
> Charlène.
>
>
> [0] https://github.com/godotengine/godot/pull/31321


Index: Makefile
===================================================================
RCS file: /cvs/ports/games/godot/Makefile,v
retrieving revision 1.8
diff -u -p -u -p -r1.8 Makefile
--- Makefile 16 Aug 2019 15:38:15 -0000 1.8
+++ Makefile 16 Aug 2019 22:02:03 -0000
@@ -8,7 +8,7 @@ PKGNAME = godot-${V}
 CATEGORIES = games
 HOMEPAGE = https://godotengine.org/
 MAINTAINER = Thomas Frohwein <[hidden email]>
-REVISION = 2
+REVISION = 3
 
 # MIT
 PERMIT_PACKAGE = Yes
@@ -68,6 +68,18 @@ LIB_DEPENDS = archivers/zstd \
  net/enet
 
 NO_TEST = Yes
+
+.if ${MACHINE_ARCH:Mhppa} || ${MACHINE_ARCH:Mpowerpc}
+LDFLAGS +=     -latomic
+WANTLIB +=     atomic
+.endif
+
+# Fix relocation overflows
+.if ${MACHINE_ARCH:Mpowerpc}
+CFLAGS +=      -mlongcall
+CXXFLAGS +=    -mlongcall
+LDFLAGS +=     -Wl,--relax
+.endif
 
 pre-configure:
  ${SUBST_CMD} ${WRKSRC}/drivers/unix/os_unix.cpp
Index: patches/patch-core_safe_refcount_h
===================================================================
RCS file: patches/patch-core_safe_refcount_h
diff -N patches/patch-core_safe_refcount_h
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ patches/patch-core_safe_refcount_h 16 Aug 2019 22:02:03 -0000
@@ -0,0 +1,68 @@
+$OpenBSD$
+
+hppa, ppc: use __atomic functions as 64-bit __sync operators
+are not supported, from:
+https://github.com/godotengine/godot/pull/31321 
+
+Index: core/safe_refcount.h
+--- core/safe_refcount.h.orig
++++ core/safe_refcount.h
+@@ -99,8 +99,8 @@ static _ALWAYS_INLINE_ T atomic_exchange_if_greater(re
+
+ /* Implementation for GCC & Clang */
+
+-// GCC guarantees atomic intrinsics for sizes of 1, 2, 4 and 8 bytes.
+-// Clang states it supports GCC atomic builtins.
++#include <stdbool.h>
++#include <atomic>
+
+ template <class T>
+ static _ALWAYS_INLINE_ T atomic_conditional_increment(register T *pw) {
+@@ -109,7 +109,7 @@ static _ALWAYS_INLINE_ T atomic_conditional_increment(
+ T tmp = static_cast<T const volatile &>(*pw);
+ if (tmp == 0)
+ return 0; // if zero, can't add to it anymore
+- if (__sync_val_compare_and_swap(pw, tmp, tmp + 1) == tmp)
++ if (__atomic_compare_exchange_n(pw, &tmp, tmp + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) == true)
+ return tmp + 1;
+ }
+ }
+@@ -117,25 +117,25 @@ static _ALWAYS_INLINE_ T atomic_conditional_increment(
+ template <class T>
+ static _ALWAYS_INLINE_ T atomic_decrement(register T *pw) {
+
+- return __sync_sub_and_fetch(pw, 1);
++ return __atomic_sub_fetch(pw, 1, __ATOMIC_SEQ_CST);
+ }
+
+ template <class T>
+ static _ALWAYS_INLINE_ T atomic_increment(register T *pw) {
+
+- return __sync_add_and_fetch(pw, 1);
++ return __atomic_add_fetch(pw, 1, __ATOMIC_SEQ_CST);
+ }
+
+ template <class T, class V>
+ static _ALWAYS_INLINE_ T atomic_sub(register T *pw, register V val) {
+
+- return __sync_sub_and_fetch(pw, val);
++ return __atomic_sub_fetch(pw, val, __ATOMIC_SEQ_CST);
+ }
+
+ template <class T, class V>
+ static _ALWAYS_INLINE_ T atomic_add(register T *pw, register V val) {
+
+- return __sync_add_and_fetch(pw, val);
++ return __atomic_add_fetch(pw, val, __ATOMIC_SEQ_CST);
+ }
+
+ template <class T, class V>
+@@ -145,7 +145,7 @@ static _ALWAYS_INLINE_ T atomic_exchange_if_greater(re
+ T tmp = static_cast<T const volatile &>(*pw);
+ if (tmp >= val)
+ return tmp; // already greater, or equal
+- if (__sync_val_compare_and_swap(pw, tmp, val) == tmp)
++ if (__atomic_compare_exchange_n(pw, &tmp, val, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) == true)
+ return val;
+ }
+ }