summaryrefslogtreecommitdiffstats
path: root/libgomp
diff options
context:
space:
mode:
authorJakub Jelinek <jakub@redhat.com>2022-04-26 08:57:17 +0200
committerJakub Jelinek <jakub@redhat.com>2022-04-26 08:57:17 +0200
commit57a957cb71c004de80b0fd30c8db3cc67576e0ce (patch)
tree74607f2b52669b8db12005d197bc22f723474e0f /libgomp
parentc++: generic lambda fn parm pack [PR104624] (diff)
downloadgcc-57a957cb71c004de80b0fd30c8db3cc67576e0ce.tar.gz
gcc-57a957cb71c004de80b0fd30c8db3cc67576e0ce.tar.bz2
gcc-57a957cb71c004de80b0fd30c8db3cc67576e0ce.tar.xz
libgomp: Fix up two non-GOMP_USE_ALIGNED_WORK_SHARES related issues [PR105358]
Last fall I've changed struct gomp_work_share, so that it doesn't have __attribute__((aligned (64))) lock member in the middle unless the target has non-emulated aligned allocator, otherwise it just makes sure the first and second halves are 64 bytes appart for cache line reasons, but doesn't make the struct 64-byte aligned itself and so we can use normal allocators for it. When the struct isn't 64-byte aligned, the amount of tail padding significantly decreases, to 0 or 4 bytes or so. The library uses that tail padding when the ordered_teams_ids array (array of uints) and/or the memory for lastprivate conditional temporaries (the latter wants to guarantee long long alignment). The problem with it on ia32 darwin9 is that while the struct contains long long members, long long is just 4 byte aligned while __alignof__(long long) is 8. That causes problems in gomp_init_work_share, where we currently rely on if offsetof (struct gomp_work_share, inline_ordered_team_ids) is long long aligned, then that tail array will be aligned at runtime and so no extra memory for dynamic realignment will be needed (that is false when the whole struct doesn't have long long alignment). And also in the remaining hunks causes another problem, where we compute INLINE_ORDERED_TEAM_IDS_OFF as the above offsetof aligned up to long long boundary and subtract sizeof (struct gomp_work_share) and INLINE_ORDERED_TEAM_IDS_OFF. When unlucky, the former isn't multiple of 8 and the latter is 4 bigger than that and as the subtraction is done in size_t, we end up with (size_t) -4, so the comparison doesn't really work. The fixes add additional conditions to make it work properly, but all of them should be evaluated at compile time when optimizing and so shouldn't slow anything. 2022-04-26 Jakub Jelinek <jakub@redhat.com> PR libgomp/105358 * work.c (gomp_init_work_share): Don't mask of adjustment for dynamic long long realignment if struct gomp_work_share has smaller alignof than long long. * loop.c (GOMP_loop_start): Don't use inline_ordered_team_ids if struct gomp_work_share has smaller alignof than long long or if sizeof (struct gomp_work_share) is smaller than INLINE_ORDERED_TEAM_IDS_OFF. * loop_ull.c (GOMP_loop_ull_start): Likewise. * sections.c (GOMP_sections2_start): Likewise.
Diffstat (limited to 'libgomp')
-rw-r--r--libgomp/loop.c7
-rw-r--r--libgomp/loop_ull.c7
-rw-r--r--libgomp/sections.c7
-rw-r--r--libgomp/work.c4
4 files changed, 18 insertions, 7 deletions
diff --git a/libgomp/loop.c b/libgomp/loop.c
index 682df39a4ed..be85162bb1e 100644
--- a/libgomp/loop.c
+++ b/libgomp/loop.c
@@ -270,8 +270,11 @@ GOMP_loop_start (long start, long end, long incr, long sched,
270#define INLINE_ORDERED_TEAM_IDS_OFF \ 270#define INLINE_ORDERED_TEAM_IDS_OFF \
271 ((offsetof (struct gomp_work_share, inline_ordered_team_ids) \ 271 ((offsetof (struct gomp_work_share, inline_ordered_team_ids) \
272 + __alignof__ (long long) - 1) & ~(__alignof__ (long long) - 1)) 272 + __alignof__ (long long) - 1) & ~(__alignof__ (long long) - 1))
273 if (size > (sizeof (struct gomp_work_share) 273 if (sizeof (struct gomp_work_share)
274 - INLINE_ORDERED_TEAM_IDS_OFF)) 274 <= INLINE_ORDERED_TEAM_IDS_OFF
275 || __alignof__ (struct gomp_work_share) < __alignof__ (long long)
276 || size > (sizeof (struct gomp_work_share)
277 - INLINE_ORDERED_TEAM_IDS_OFF))
275 *mem 278 *mem
276 = (void *) (thr->ts.work_share->ordered_team_ids 279 = (void *) (thr->ts.work_share->ordered_team_ids
277 = gomp_malloc_cleared (size)); 280 = gomp_malloc_cleared (size));
diff --git a/libgomp/loop_ull.c b/libgomp/loop_ull.c
index 2aaa34e3bca..602737296d4 100644
--- a/libgomp/loop_ull.c
+++ b/libgomp/loop_ull.c
@@ -269,8 +269,11 @@ GOMP_loop_ull_start (bool up, gomp_ull start, gomp_ull end,
269#define INLINE_ORDERED_TEAM_IDS_OFF \ 269#define INLINE_ORDERED_TEAM_IDS_OFF \
270 ((offsetof (struct gomp_work_share, inline_ordered_team_ids) \ 270 ((offsetof (struct gomp_work_share, inline_ordered_team_ids) \
271 + __alignof__ (long long) - 1) & ~(__alignof__ (long long) - 1)) 271 + __alignof__ (long long) - 1) & ~(__alignof__ (long long) - 1))
272 if (size > (sizeof (struct gomp_work_share) 272 if (sizeof (struct gomp_work_share)
273 - INLINE_ORDERED_TEAM_IDS_OFF)) 273 <= INLINE_ORDERED_TEAM_IDS_OFF
274 || __alignof__ (struct gomp_work_share) < __alignof__ (long long)
275 || size > (sizeof (struct gomp_work_share)
276 - INLINE_ORDERED_TEAM_IDS_OFF))
274 *mem 277 *mem
275 = (void *) (thr->ts.work_share->ordered_team_ids 278 = (void *) (thr->ts.work_share->ordered_team_ids
276 = gomp_malloc_cleared (size)); 279 = gomp_malloc_cleared (size));
diff --git a/libgomp/sections.c b/libgomp/sections.c
index e9d99e434ac..7751d5aac83 100644
--- a/libgomp/sections.c
+++ b/libgomp/sections.c
@@ -121,8 +121,11 @@ GOMP_sections2_start (unsigned count, uintptr_t *reductions, void **mem)
121#define INLINE_ORDERED_TEAM_IDS_OFF \ 121#define INLINE_ORDERED_TEAM_IDS_OFF \
122 ((offsetof (struct gomp_work_share, inline_ordered_team_ids) \ 122 ((offsetof (struct gomp_work_share, inline_ordered_team_ids) \
123 + __alignof__ (long long) - 1) & ~(__alignof__ (long long) - 1)) 123 + __alignof__ (long long) - 1) & ~(__alignof__ (long long) - 1))
124 if (size > (sizeof (struct gomp_work_share) 124 if (sizeof (struct gomp_work_share)
125 - INLINE_ORDERED_TEAM_IDS_OFF)) 125 <= INLINE_ORDERED_TEAM_IDS_OFF
126 || __alignof__ (struct gomp_work_share) < __alignof__ (long long)
127 || size > (sizeof (struct gomp_work_share)
128 - INLINE_ORDERED_TEAM_IDS_OFF))
126 *mem 129 *mem
127 = (void *) (thr->ts.work_share->ordered_team_ids 130 = (void *) (thr->ts.work_share->ordered_team_ids
128 = gomp_malloc_cleared (size)); 131 = gomp_malloc_cleared (size));
diff --git a/libgomp/work.c b/libgomp/work.c
index a88409dc78b..c53625afe2c 100644
--- a/libgomp/work.c
+++ b/libgomp/work.c
@@ -113,7 +113,9 @@ gomp_init_work_share (struct gomp_work_share *ws, size_t ordered,
113 size_t o = nthreads * sizeof (*ws->ordered_team_ids); 113 size_t o = nthreads * sizeof (*ws->ordered_team_ids);
114 o += __alignof__ (long long) - 1; 114 o += __alignof__ (long long) - 1;
115 if ((offsetof (struct gomp_work_share, inline_ordered_team_ids) 115 if ((offsetof (struct gomp_work_share, inline_ordered_team_ids)
116 & (__alignof__ (long long) - 1)) == 0) 116 & (__alignof__ (long long) - 1)) == 0
117 && __alignof__ (struct gomp_work_share)
118 >= __alignof__ (long long))
117 o &= ~(__alignof__ (long long) - 1); 119 o &= ~(__alignof__ (long long) - 1);
118 ordered += o - 1; 120 ordered += o - 1;
119 } 121 }