universally apply our cflags (no vsx, no altivec..)
[glibc.git] / fbtl / pthread_getattr_np.c
1 /* Copyright (C) 2002-2013 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19 #include <assert.h>
20 #include <errno.h>
21 #include <inttypes.h>
22 #include <stdio.h>
23 #include <stdio_ext.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/resource.h>
27 #include "pthreadP.h"
28 #include <lowlevellock.h>
29 #include <ldsodefs.h>
30
31
32 int
33 pthread_getattr_np (pthread_t thread_id, pthread_attr_t *attr)
34 {
35 struct pthread *thread = (struct pthread *) thread_id;
36 struct pthread_attr *iattr = (struct pthread_attr *) attr;
37 int ret = 0;
38
39 lll_lock (thread->lock, LLL_PRIVATE);
40
41 /* The thread library is responsible for keeping the values in the
42 thread desriptor up-to-date in case the user changes them. */
43 memcpy (&iattr->schedparam, &thread->schedparam,
44 sizeof (struct sched_param));
45 iattr->schedpolicy = thread->schedpolicy;
46
47 /* Clear the flags work. */
48 iattr->flags = thread->flags;
49
50 /* The thread might be detached by now. */
51 if (IS_DETACHED (thread))
52 iattr->flags |= ATTR_FLAG_DETACHSTATE;
53
54 /* This is the guardsize after adjusting it. */
55 iattr->guardsize = thread->reported_guardsize;
56
57 /* The sizes are subject to alignment. */
58 if (__glibc_likely (thread->stackblock != NULL))
59 {
60 iattr->stacksize = thread->stackblock_size;
61 iattr->stackaddr = (char *) thread->stackblock + iattr->stacksize;
62 }
63 else
64 {
65 /* No stack information available. This must be for the initial
66 thread. Get the info in some magical way. */
67
68 /* Stack size limit. */
69 struct rlimit rl;
70
71 /* The safest way to get the top of the stack is to read
72 /proc/self/maps and locate the line into which
73 __libc_stack_end falls. */
74 FILE *fp = fopen ("/proc/self/maps", "rce");
75 if (fp == NULL)
76 ret = errno;
77 /* We need the limit of the stack in any case. */
78 else
79 {
80 if (getrlimit (RLIMIT_STACK, &rl) != 0)
81 ret = errno;
82 else
83 {
84 /* We consider the main process stack to have ended with
85 the page containing __libc_stack_end. There is stuff below
86 it in the stack too, like the program arguments, environment
87 variables and auxv info, but we ignore those pages when
88 returning size so that the output is consistent when the
89 stack is marked executable due to a loaded DSO requiring
90 it. */
91 void *stack_end = (void *) ((uintptr_t) __libc_stack_end
92 & -(uintptr_t) GLRO(dl_pagesize));
93 #if _STACK_GROWS_DOWN
94 stack_end += GLRO(dl_pagesize);
95 #endif
96 /* We need no locking. */
97 __fsetlocking (fp, FSETLOCKING_BYCALLER);
98
99 /* Until we found an entry (which should always be the case)
100 mark the result as a failure. */
101 ret = ENOENT;
102
103 char *line = NULL;
104 size_t linelen = 0;
105 uintptr_t last_to = 0;
106
107 while (! feof_unlocked (fp))
108 {
109 if (__getdelim (&line, &linelen, '\n', fp) <= 0)
110 break;
111
112 uintptr_t from;
113 uintptr_t to;
114 if (sscanf (line, "%" SCNxPTR "-%" SCNxPTR, &from, &to) != 2)
115 continue;
116 if (from <= (uintptr_t) __libc_stack_end
117 && (uintptr_t) __libc_stack_end < to)
118 {
119 /* Found the entry. Now we have the info we need. */
120 iattr->stackaddr = stack_end;
121 iattr->stacksize =
122 rl.rlim_cur - (size_t) (to - (uintptr_t) stack_end);
123
124 /* Cut it down to align it to page size since otherwise we
125 risk going beyond rlimit when the kernel rounds up the
126 stack extension request. */
127 iattr->stacksize = (iattr->stacksize
128 & -(intptr_t) GLRO(dl_pagesize));
129
130 /* The limit might be too high. */
131 if ((size_t) iattr->stacksize
132 > (size_t) iattr->stackaddr - last_to)
133 iattr->stacksize = (size_t) iattr->stackaddr - last_to;
134
135 /* We succeed and no need to look further. */
136 ret = 0;
137 break;
138 }
139 last_to = to;
140 }
141
142 free (line);
143 }
144
145 fclose (fp);
146 }
147 }
148
149 iattr->flags |= ATTR_FLAG_STACKADDR;
150
151 if (ret == 0)
152 {
153 size_t size = 16;
154 cpu_set_t *cpuset = NULL;
155
156 do
157 {
158 size <<= 1;
159
160 void *newp = realloc (cpuset, size);
161 if (newp == NULL)
162 {
163 ret = ENOMEM;
164 break;
165 }
166 cpuset = (cpu_set_t *) newp;
167 #if 1
168 #warning not yet pthread_getaffinity_np
169 ret = ENOSYS;
170 #else
171 ret = __pthread_getaffinity_np (thread_id, size, cpuset);
172 #endif
173 }
174 /* Pick some ridiculous upper limit. Is 8 million CPUs enough? */
175 while (ret == EINVAL && size < 1024 * 1024);
176
177 if (ret == 0)
178 {
179 iattr->cpuset = cpuset;
180 iattr->cpusetsize = size;
181 }
182 else
183 {
184 free (cpuset);
185 if (ret == ENOSYS)
186 {
187 /* There is no such functionality. */
188 ret = 0;
189 iattr->cpuset = NULL;
190 iattr->cpusetsize = 0;
191 }
192 }
193 }
194
195 lll_unlock (thread->lock, LLL_PRIVATE);
196
197 return ret;
198 }