24 extern kmp_omp_struct_info_t __kmp_omp_debug_struct_info;
28 int __kmp_debugging = FALSE;
30 #define offset_and_size_of(structure, field) \ 31 { offsetof(structure, field), sizeof(((structure *)NULL)->field) } 33 #define offset_and_size_not_available \ 36 #define addr_and_size_of(var) \ 37 { (kmp_uint64)(&var), sizeof(var) } 39 #define nthr_buffer_size 1024 40 static kmp_int32 kmp_omp_nthr_info_buffer[nthr_buffer_size] = {
41 nthr_buffer_size *
sizeof(kmp_int32)};
44 static char func_microtask[] =
"__kmp_invoke_microtask";
45 static char func_fork[] =
"__kmpc_fork_call";
46 static char func_fork_teams[] =
"__kmpc_fork_teams";
49 kmp_omp_struct_info_t __kmp_omp_debug_struct_info = {
57 sizeof(kmp_omp_struct_info_t),
60 addr_and_size_of(__kmp_version_major),
61 addr_and_size_of(__kmp_version_minor),
62 addr_and_size_of(__kmp_version_build),
63 addr_and_size_of(__kmp_openmp_version),
64 {(kmp_uint64)(__kmp_copyright) + KMP_VERSION_MAGIC_LEN,
68 addr_and_size_of(__kmp_threads),
69 addr_and_size_of(__kmp_root),
70 addr_and_size_of(__kmp_threads_capacity),
71 addr_and_size_of(__kmp_monitor),
72 #if !KMP_USE_DYNAMIC_LOCK 73 addr_and_size_of(__kmp_user_lock_table),
75 addr_and_size_of(func_microtask),
76 addr_and_size_of(func_fork),
77 addr_and_size_of(func_fork_teams),
78 addr_and_size_of(__kmp_team_counter),
79 addr_and_size_of(__kmp_task_counter),
80 addr_and_size_of(kmp_omp_nthr_info_buffer),
82 OMP_LOCK_T_SIZE <
sizeof(
void *),
84 INITIAL_TASK_DEQUE_SIZE,
87 sizeof(kmp_base_info_t),
88 offset_and_size_of(kmp_base_info_t, th_info),
89 offset_and_size_of(kmp_base_info_t, th_team),
90 offset_and_size_of(kmp_base_info_t, th_root),
91 offset_and_size_of(kmp_base_info_t, th_serial_team),
92 offset_and_size_of(kmp_base_info_t, th_ident),
93 offset_and_size_of(kmp_base_info_t, th_spin_here),
94 offset_and_size_of(kmp_base_info_t, th_next_waiting),
95 offset_and_size_of(kmp_base_info_t, th_task_team),
96 offset_and_size_of(kmp_base_info_t, th_current_task),
97 offset_and_size_of(kmp_base_info_t, th_task_state),
98 offset_and_size_of(kmp_base_info_t, th_bar),
99 offset_and_size_of(kmp_bstate_t, b_worker_arrived),
103 offset_and_size_of(kmp_base_info_t, th_teams_microtask),
104 offset_and_size_of(kmp_base_info_t, th_teams_level),
105 offset_and_size_of(kmp_teams_size_t, nteams),
106 offset_and_size_of(kmp_teams_size_t, nth),
110 sizeof(kmp_desc_base_t),
111 offset_and_size_of(kmp_desc_base_t, ds_tid),
112 offset_and_size_of(kmp_desc_base_t, ds_gtid),
116 offset_and_size_of(kmp_desc_base_t, ds_thread_id),
118 offset_and_size_of(kmp_desc_base_t, ds_thread),
122 sizeof(kmp_base_team_t),
123 offset_and_size_of(kmp_base_team_t, t_master_tid),
124 offset_and_size_of(kmp_base_team_t, t_ident),
125 offset_and_size_of(kmp_base_team_t, t_parent),
126 offset_and_size_of(kmp_base_team_t, t_nproc),
127 offset_and_size_of(kmp_base_team_t, t_threads),
128 offset_and_size_of(kmp_base_team_t, t_serialized),
129 offset_and_size_of(kmp_base_team_t, t_id),
130 offset_and_size_of(kmp_base_team_t, t_pkfn),
131 offset_and_size_of(kmp_base_team_t, t_task_team),
132 offset_and_size_of(kmp_base_team_t, t_implicit_task_taskdata),
134 offset_and_size_of(kmp_base_team_t, t_cancel_request),
136 offset_and_size_of(kmp_base_team_t, t_bar),
137 offset_and_size_of(kmp_balign_team_t, b_master_arrived),
138 offset_and_size_of(kmp_balign_team_t, b_team_arrived),
141 sizeof(kmp_base_root_t),
142 offset_and_size_of(kmp_base_root_t, r_root_team),
143 offset_and_size_of(kmp_base_root_t, r_hot_team),
144 offset_and_size_of(kmp_base_root_t, r_uber_thread),
145 offset_and_size_not_available,
149 offset_and_size_of(
ident_t, psource),
150 offset_and_size_of(
ident_t, flags),
153 sizeof(kmp_base_queuing_lock_t),
154 offset_and_size_of(kmp_base_queuing_lock_t, initialized),
155 offset_and_size_of(kmp_base_queuing_lock_t, location),
156 offset_and_size_of(kmp_base_queuing_lock_t, tail_id),
157 offset_and_size_of(kmp_base_queuing_lock_t, head_id),
158 offset_and_size_of(kmp_base_queuing_lock_t, next_ticket),
159 offset_and_size_of(kmp_base_queuing_lock_t, now_serving),
160 offset_and_size_of(kmp_base_queuing_lock_t, owner_id),
161 offset_and_size_of(kmp_base_queuing_lock_t, depth_locked),
162 offset_and_size_of(kmp_base_queuing_lock_t, flags),
164 #if !KMP_USE_DYNAMIC_LOCK 166 sizeof(kmp_lock_table_t),
167 offset_and_size_of(kmp_lock_table_t, used),
168 offset_and_size_of(kmp_lock_table_t, allocated),
169 offset_and_size_of(kmp_lock_table_t, table),
173 sizeof(kmp_base_task_team_t),
174 offset_and_size_of(kmp_base_task_team_t, tt_threads_data),
175 offset_and_size_of(kmp_base_task_team_t, tt_found_tasks),
176 offset_and_size_of(kmp_base_task_team_t, tt_nproc),
177 offset_and_size_of(kmp_base_task_team_t, tt_unfinished_threads),
178 offset_and_size_of(kmp_base_task_team_t, tt_active),
181 sizeof(kmp_taskdata_t),
182 offset_and_size_of(kmp_taskdata_t, td_task_id),
183 offset_and_size_of(kmp_taskdata_t, td_flags),
184 offset_and_size_of(kmp_taskdata_t, td_team),
185 offset_and_size_of(kmp_taskdata_t, td_parent),
186 offset_and_size_of(kmp_taskdata_t, td_level),
187 offset_and_size_of(kmp_taskdata_t, td_ident),
188 offset_and_size_of(kmp_taskdata_t, td_allocated_child_tasks),
189 offset_and_size_of(kmp_taskdata_t, td_incomplete_child_tasks),
191 offset_and_size_of(kmp_taskdata_t, td_taskwait_ident),
192 offset_and_size_of(kmp_taskdata_t, td_taskwait_counter),
193 offset_and_size_of(kmp_taskdata_t, td_taskwait_thread),
196 offset_and_size_of(kmp_taskdata_t, td_taskgroup),
197 offset_and_size_of(kmp_taskgroup_t, count),
198 offset_and_size_of(kmp_taskgroup_t, cancel_request),
200 offset_and_size_of(kmp_taskdata_t, td_depnode),
201 offset_and_size_of(kmp_depnode_list_t, node),
202 offset_and_size_of(kmp_depnode_list_t, next),
203 offset_and_size_of(kmp_base_depnode_t, successors),
204 offset_and_size_of(kmp_base_depnode_t, task),
205 offset_and_size_of(kmp_base_depnode_t, npredecessors),
206 offset_and_size_of(kmp_base_depnode_t, nrefs),
208 offset_and_size_of(kmp_task_t, routine),
211 sizeof(kmp_thread_data_t),
212 offset_and_size_of(kmp_base_thread_data_t, td_deque),
213 offset_and_size_of(kmp_base_thread_data_t, td_deque_size),
214 offset_and_size_of(kmp_base_thread_data_t, td_deque_head),
215 offset_and_size_of(kmp_base_thread_data_t, td_deque_tail),
216 offset_and_size_of(kmp_base_thread_data_t, td_deque_ntasks),
217 offset_and_size_of(kmp_base_thread_data_t, td_deque_last_stolen),
224 #undef offset_and_size_of 225 #undef addr_and_size_of 231 static inline void *__kmp_convert_to_ptr(kmp_uint64 addr) {
233 #pragma warning(push) 234 #pragma warning(disable : 810) // conversion from "unsigned long long" to "char 236 #pragma warning(disable : 1195) // conversion from integer to smaller pointer 237 #endif // KMP_COMPILER_ICC 241 #endif // KMP_COMPILER_ICC 244 static int kmp_location_match(kmp_str_loc_t *loc, kmp_omp_nthr_item_t *item) {
250 char *file = (
char *)__kmp_convert_to_ptr(item->file);
251 char *func = (
char *)__kmp_convert_to_ptr(item->func);
252 file_match = __kmp_str_fname_match(&loc->fname, file);
255 || strcmp(func,
"*") == 0 ||
256 (loc->func != NULL && strcmp(loc->func, func) == 0);
258 item->begin <= loc->line &&
260 loc->line <= item->end);
262 return (file_match && func_match && line_match);
270 kmp_omp_nthr_info_t *info = (kmp_omp_nthr_info_t *)__kmp_convert_to_ptr(
271 __kmp_omp_debug_struct_info.nthr_info.addr);
272 if (info->num > 0 && info->array != 0) {
273 kmp_omp_nthr_item_t *items =
274 (kmp_omp_nthr_item_t *)__kmp_convert_to_ptr(info->array);
275 kmp_str_loc_t loc = __kmp_str_loc_init(ident->
psource, 1);
277 for (i = 0; i < info->num; ++i) {
278 if (kmp_location_match(&loc, &items[i])) {
279 num_threads = items[i].num_threads;
282 __kmp_str_loc_free(&loc);