Skip to content

Commit

Permalink
[openmp] Allow x87 fp functions only in Openmp runtime for x86.
Browse files Browse the repository at this point in the history
This patch allows Openmp runtime atomic functions operating on x87 high-precision
to be present only in Openmp runtime for x86 architectures

The functions affected are:

__kmpc_atomic_10
__kmpc_atomic_20
__kmpc_atomic_cmplx10_add
__kmpc_atomic_cmplx10_div
__kmpc_atomic_cmplx10_mul
__kmpc_atomic_cmplx10_sub
__kmpc_atomic_float10_add
__kmpc_atomic_float10_div
__kmpc_atomic_float10_mul
__kmpc_atomic_float10_sub

__kmpc_atomic_float10_add_fp
__kmpc_atomic_float10_div_fp
__kmpc_atomic_float10_mul_fp
__kmpc_atomic_float10_sub_fp
__kmpc_atomic_float10_max
__kmpc_atomic_float10_min

Differential Revision: https://reviews.llvm.org/D117473
  • Loading branch information
malJaj committed Jan 22, 2022
1 parent 7c16647 commit c1988db
Showing 1 changed file with 11 additions and 4 deletions.
15 changes: 11 additions & 4 deletions openmp/runtime/src/kmp_atomic.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1235,10 +1235,12 @@ MIN_MAX_COMPXCHG(float8, max, kmp_real64, 64, <, 8r, 7,
KMP_ARCH_X86) // __kmpc_atomic_float8_max
MIN_MAX_COMPXCHG(float8, min, kmp_real64, 64, >, 8r, 7,
KMP_ARCH_X86) // __kmpc_atomic_float8_min
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
MIN_MAX_CRITICAL(float10, max, long double, <, 10r,
1) // __kmpc_atomic_float10_max
MIN_MAX_CRITICAL(float10, min, long double, >, 10r,
1) // __kmpc_atomic_float10_min
#endif // KMP_ARCH_X86 || KMP_ARCH_X86_64
#if KMP_HAVE_QUAD
MIN_MAX_CRITICAL(float16, max, QUAD_LEGACY, <, 16r,
1) // __kmpc_atomic_float16_max
Expand Down Expand Up @@ -1317,6 +1319,7 @@ ATOMIC_CMPX_EQV(fixed8, eqv, kmp_int64, 64, ^~, 8i, 7,
}

/* ------------------------------------------------------------------------- */
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// routines for long double type
ATOMIC_CRITICAL(float10, add, long double, +, 10r,
1) // __kmpc_atomic_float10_add
Expand All @@ -1326,6 +1329,7 @@ ATOMIC_CRITICAL(float10, mul, long double, *, 10r,
1) // __kmpc_atomic_float10_mul
ATOMIC_CRITICAL(float10, div, long double, /, 10r,
1) // __kmpc_atomic_float10_div
#endif // KMP_ARCH_X86 || KMP_ARCH_X86_64
#if KMP_HAVE_QUAD
// routines for _Quad type
ATOMIC_CRITICAL(float16, add, QUAD_LEGACY, +, 16r,
Expand Down Expand Up @@ -1371,6 +1375,7 @@ ATOMIC_CRITICAL(cmplx8, add, kmp_cmplx64, +, 16c, 1) // __kmpc_atomic_cmplx8_add
ATOMIC_CRITICAL(cmplx8, sub, kmp_cmplx64, -, 16c, 1) // __kmpc_atomic_cmplx8_sub
ATOMIC_CRITICAL(cmplx8, mul, kmp_cmplx64, *, 16c, 1) // __kmpc_atomic_cmplx8_mul
ATOMIC_CRITICAL(cmplx8, div, kmp_cmplx64, /, 16c, 1) // __kmpc_atomic_cmplx8_div
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
ATOMIC_CRITICAL(cmplx10, add, kmp_cmplx80, +, 20c,
1) // __kmpc_atomic_cmplx10_add
ATOMIC_CRITICAL(cmplx10, sub, kmp_cmplx80, -, 20c,
Expand All @@ -1379,6 +1384,7 @@ ATOMIC_CRITICAL(cmplx10, mul, kmp_cmplx80, *, 20c,
1) // __kmpc_atomic_cmplx10_mul
ATOMIC_CRITICAL(cmplx10, div, kmp_cmplx80, /, 20c,
1) // __kmpc_atomic_cmplx10_div
#endif // KMP_ARCH_X86 || KMP_ARCH_X86_64
#if KMP_HAVE_QUAD
ATOMIC_CRITICAL(cmplx16, add, CPLX128_LEG, +, 32c,
1) // __kmpc_atomic_cmplx16_add
Expand Down Expand Up @@ -1797,6 +1803,7 @@ ATOMIC_CMPXCHG_MIX(float8, kmp_real64, mul, 64, *, fp, _Quad, 8r, 7,
ATOMIC_CMPXCHG_MIX(float8, kmp_real64, div, 64, /, fp, _Quad, 8r, 7,
KMP_ARCH_X86) // __kmpc_atomic_float8_div_fp

#if KMP_ARCH_X86 || KMP_ARCH_X86_64
ATOMIC_CRITICAL_FP(float10, long double, add, +, fp, _Quad, 10r,
1) // __kmpc_atomic_float10_add_fp
ATOMIC_CRITICAL_FP(float10, long double, sub, -, fp, _Quad, 10r,
Expand All @@ -1806,7 +1813,6 @@ ATOMIC_CRITICAL_FP(float10, long double, mul, *, fp, _Quad, 10r,
ATOMIC_CRITICAL_FP(float10, long double, div, /, fp, _Quad, 10r,
1) // __kmpc_atomic_float10_div_fp

#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// Reverse operations
ATOMIC_CMPXCHG_REV_MIX(fixed1, char, sub_rev, 8, -, fp, _Quad, 1i, 0,
KMP_ARCH_X86) // __kmpc_atomic_fixed1_sub_rev_fp
Expand Down Expand Up @@ -3594,7 +3600,7 @@ void __kmpc_atomic_8(ident_t *id_ref, int gtid, void *lhs, void *rhs,
__kmp_release_atomic_lock(&__kmp_atomic_lock_8i, gtid);
}
}

#if KMP_ARCH_X86 || KMP_ARCH_X86_64
void __kmpc_atomic_10(ident_t *id_ref, int gtid, void *lhs, void *rhs,
void (*f)(void *, void *, void *)) {
KMP_DEBUG_ASSERT(__kmp_init_serial);
Expand All @@ -3615,6 +3621,7 @@ void __kmpc_atomic_10(ident_t *id_ref, int gtid, void *lhs, void *rhs,
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock(&__kmp_atomic_lock_10r, gtid);
}
#endif // KMP_ARCH_X86 || KMP_ARCH_X86_64

void __kmpc_atomic_16(ident_t *id_ref, int gtid, void *lhs, void *rhs,
void (*f)(void *, void *, void *)) {
Expand All @@ -3636,7 +3643,7 @@ void __kmpc_atomic_16(ident_t *id_ref, int gtid, void *lhs, void *rhs,
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock(&__kmp_atomic_lock_16c, gtid);
}

#if KMP_ARCH_X86 || KMP_ARCH_X86_64
void __kmpc_atomic_20(ident_t *id_ref, int gtid, void *lhs, void *rhs,
void (*f)(void *, void *, void *)) {
KMP_DEBUG_ASSERT(__kmp_init_serial);
Expand All @@ -3657,7 +3664,7 @@ void __kmpc_atomic_20(ident_t *id_ref, int gtid, void *lhs, void *rhs,
#endif /* KMP_GOMP_COMPAT */
__kmp_release_atomic_lock(&__kmp_atomic_lock_20c, gtid);
}

#endif // KMP_ARCH_X86 || KMP_ARCH_X86_64
void __kmpc_atomic_32(ident_t *id_ref, int gtid, void *lhs, void *rhs,
void (*f)(void *, void *, void *)) {
KMP_DEBUG_ASSERT(__kmp_init_serial);
Expand Down

0 comments on commit c1988db

Please sign in to comment.