StarPU Internal Handbook
sched_ctx.h
Go to the documentation of this file.
1 /* StarPU --- Runtime system for heterogeneous multicore architectures.
2  *
3  * Copyright (C) 2011-2023 Université de Bordeaux, CNRS (LaBRI UMR 5800), Inria
4  * Copyright (C) 2016 Uppsala University
5  *
6  * StarPU is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU Lesser General Public License as published by
8  * the Free Software Foundation; either version 2.1 of the License, or (at
9  * your option) any later version.
10  *
11  * StarPU is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
14  *
15  * See the GNU Lesser General Public License in COPYING.LGPL for more details.
16  */
17 
18 #ifndef __SCHED_CONTEXT_H__
19 #define __SCHED_CONTEXT_H__
20 
23 #include <starpu.h>
24 #include <starpu_sched_ctx.h>
25 #include <starpu_sched_ctx_hypervisor.h>
26 #include <starpu_scheduler.h>
27 #include <common/config.h>
28 #include <common/barrier_counter.h>
29 #include <common/utils.h>
30 #include <profiling/profiling.h>
31 #include <semaphore.h>
32 #include <core/task.h>
33 #include "sched_ctx_list.h"
34 
35 #ifdef STARPU_HAVE_HWLOC
36 #include <hwloc.h>
37 #endif
38 
39 #pragma GCC visibility push(hidden)
40 
41 #define NO_RESIZE -1
42 #define REQ_RESIZE 0
43 #define DO_RESIZE 1
44 
45 #define STARPU_GLOBAL_SCHED_CTX 0
46 #define STARPU_NMAXSMS 13
48 {
50  unsigned id;
51 
53  unsigned do_schedule;
54 
56  const char *name;
57 
59  struct starpu_sched_policy *sched_policy;
60 
62  void *policy_data;
63 
65  void *user_data;
66 
67  struct starpu_worker_collection *workers;
68 
70  unsigned is_initial_sched;
71 
74 
77 
79  double ready_flops;
80 
82  long iterations[2];
83  int iteration_level;
84 
85  /*ready tasks that couldn't be pushed because the ctx has no workers*/
86  struct starpu_task_list empty_ctx_tasks;
87 
88  /*ready tasks that couldn't be pushed because the the window of tasks was already full*/
89  struct starpu_task_list waiting_tasks;
90 
92  int min_ncpus;
93 
95  int max_ncpus;
96 
98  int min_ngpus;
99 
102 
104  unsigned inheritor;
105 
108  unsigned finished_submit;
109 
113  int max_priority;
114  int min_priority_is_set;
115  int max_priority_is_set;
116 
118 #ifdef STARPU_HAVE_HWLOC
119  hwloc_bitmap_t hwloc_workers_set;
120 #endif
121 
122 #ifdef STARPU_USE_SC_HYPERVISOR
124  struct starpu_sched_ctx_performance_counters *perf_counters;
125 #endif //STARPU_USE_SC_HYPERVISOR
126 
128  void (*close_callback)(unsigned sched_ctx_id, void* args);
129  void *close_args;
130 
132  unsigned hierarchy_level;
133 
138 
141 
143  struct starpu_perfmodel_arch perf_arch;
144 
147  unsigned parallel_view;
148 
152  unsigned awake_workers;
153 
155  void (*callback_sched)(unsigned);
156 
157  int sub_ctxs[STARPU_NMAXWORKERS];
158  int nsub_ctxs;
159 
161  int nsms;
162  int sms_start_idx;
163  int sms_end_idx;
164 
165  int stream_worker;
166 
167  starpu_pthread_rwlock_t rwlock;
168  starpu_pthread_t lock_write_owner;
169 };
170 
173  int sched_ctx_id;
174  int op;
175  int nworkers_to_notify;
176  int *workerids_to_notify;
177  int nworkers_to_change;
178  int *workerids_to_change;
179 );
180 
182 
185 
187 struct _starpu_sched_ctx* _starpu_create_sched_ctx(struct starpu_sched_policy *policy, int *workerid, int nworkerids, unsigned is_init_sched, const char *sched_name,
188  int min_prio_set, int min_prio,
189  int max_prio_set, int max_prio, unsigned awake_workers, void (*sched_policy_callback)(unsigned), void *user_data,
190  int nsub_ctxs, int *sub_ctxs, int nsms);
191 
194 
197 int _starpu_wait_for_all_tasks_of_sched_ctx(unsigned sched_ctx_id);
198 
200 int _starpu_wait_for_n_submitted_tasks_of_sched_ctx(unsigned sched_ctx_id, unsigned n);
201 
205 void _starpu_increment_nsubmitted_tasks_of_sched_ctx(unsigned sched_ctx_id);
206 int _starpu_get_nsubmitted_tasks_of_sched_ctx(unsigned sched_ctx_id);
207 int _starpu_check_nsubmitted_tasks_of_sched_ctx(unsigned sched_ctx_id);
208 
209 void _starpu_decrement_nready_tasks_of_sched_ctx(unsigned sched_ctx_id, double ready_flops);
210 unsigned _starpu_increment_nready_tasks_of_sched_ctx(unsigned sched_ctx_id, double ready_flops, struct starpu_task *task);
211 int _starpu_wait_for_no_ready_of_sched_ctx(unsigned sched_ctx_id);
212 
217 int _starpu_get_workers_of_sched_ctx(unsigned sched_ctx_id, int *pus, enum starpu_worker_archtype arch);
218 
222 void _starpu_worker_gets_out_of_ctx(unsigned sched_ctx_id, struct _starpu_worker *worker);
223 
225 unsigned _starpu_worker_belongs_to_a_sched_ctx(int workerid, unsigned sched_ctx_id);
226 
231 
234 unsigned _starpu_sched_ctx_get_current_context() STARPU_ATTRIBUTE_VISIBILITY_DEFAULT;
235 
238 
239 unsigned _starpu_sched_ctx_allow_hypervisor(unsigned sched_ctx_id);
240 
241 struct starpu_perfmodel_arch * _starpu_sched_ctx_get_perf_archtype(unsigned sched_ctx);
242 #ifdef STARPU_USE_SC_HYPERVISOR
244 void _starpu_sched_ctx_post_exec_task_cb(int workerid, struct starpu_task *task, size_t data_size, uint32_t footprint);
245 
246 #endif //STARPU_USE_SC_HYPERVISOR
247 
248 void starpu_sched_ctx_add_combined_workers(int *combined_workers_to_add, unsigned n_combined_workers_to_add, unsigned sched_ctx_id);
249 
252 
253 #define _starpu_sched_ctx_get_sched_ctx_for_worker_and_job(w,j) \
254  (_starpu_get_nsched_ctxs() <= 1 ? _starpu_get_sched_ctx_struct(0) : __starpu_sched_ctx_get_sched_ctx_for_worker_and_job((w),(j)))
255 
256 static inline struct _starpu_sched_ctx *_starpu_get_sched_ctx_struct(unsigned id);
257 
258 static inline int _starpu_sched_ctx_check_write_locked(unsigned sched_ctx_id)
259 {
260  struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
261  return starpu_pthread_equal(sched_ctx->lock_write_owner, starpu_pthread_self());
262 }
263 #define STARPU_SCHED_CTX_CHECK_LOCK(sched_ctx_id) STARPU_ASSERT(_starpu_sched_ctx_check_write_locked((sched_ctx_id)))
264 
265 static inline void _starpu_sched_ctx_lock_write(unsigned sched_ctx_id)
266 {
267  STARPU_ASSERT(sched_ctx_id <= STARPU_NMAX_SCHED_CTXS);
268  struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
269  STARPU_HG_DISABLE_CHECKING(sched_ctx->lock_write_owner);
270  STARPU_ASSERT(!starpu_pthread_equal(sched_ctx->lock_write_owner, starpu_pthread_self()));
271  STARPU_HG_ENABLE_CHECKING(sched_ctx->lock_write_owner);
272  STARPU_PTHREAD_RWLOCK_WRLOCK(&sched_ctx->rwlock);
273  sched_ctx->lock_write_owner = starpu_pthread_self();
274 }
275 
276 static inline void _starpu_sched_ctx_unlock_write(unsigned sched_ctx_id)
277 {
278  STARPU_ASSERT(sched_ctx_id <= STARPU_NMAX_SCHED_CTXS);
279  struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
280  STARPU_HG_DISABLE_CHECKING(sched_ctx->lock_write_owner);
281  STARPU_ASSERT(starpu_pthread_equal(sched_ctx->lock_write_owner, starpu_pthread_self()));
282  memset(&sched_ctx->lock_write_owner, 0, sizeof(sched_ctx->lock_write_owner));
283  STARPU_HG_ENABLE_CHECKING(sched_ctx->lock_write_owner);
284  STARPU_PTHREAD_RWLOCK_UNLOCK(&sched_ctx->rwlock);
285 }
286 
287 static inline void _starpu_sched_ctx_lock_read(unsigned sched_ctx_id)
288 {
289  STARPU_ASSERT(sched_ctx_id <= STARPU_NMAX_SCHED_CTXS);
290  struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
291  STARPU_HG_DISABLE_CHECKING(sched_ctx->lock_write_owner);
292  STARPU_ASSERT(!starpu_pthread_equal(sched_ctx->lock_write_owner, starpu_pthread_self()));
293  STARPU_HG_ENABLE_CHECKING(sched_ctx->lock_write_owner);
294  STARPU_PTHREAD_RWLOCK_RDLOCK(&sched_ctx->rwlock);
295 }
296 
297 static inline void _starpu_sched_ctx_unlock_read(unsigned sched_ctx_id)
298 {
299  STARPU_ASSERT(sched_ctx_id <= STARPU_NMAX_SCHED_CTXS);
300  struct _starpu_sched_ctx *sched_ctx = _starpu_get_sched_ctx_struct(sched_ctx_id);
301  STARPU_HG_DISABLE_CHECKING(sched_ctx->lock_write_owner);
302  STARPU_ASSERT(!starpu_pthread_equal(sched_ctx->lock_write_owner, starpu_pthread_self()));
303  STARPU_HG_ENABLE_CHECKING(sched_ctx->lock_write_owner);
304  STARPU_PTHREAD_RWLOCK_UNLOCK(&sched_ctx->rwlock);
305 }
306 
307 static inline unsigned _starpu_sched_ctx_worker_is_master_for_child_ctx(unsigned sched_ctx_id, unsigned workerid, struct starpu_task *task)
308 {
309  unsigned child_sched_ctx = starpu_sched_ctx_worker_is_master_for_child_ctx(workerid, sched_ctx_id);
310  if(child_sched_ctx != STARPU_NMAX_SCHED_CTXS)
311  {
312  starpu_sched_ctx_move_task_to_ctx_locked(task, child_sched_ctx, 1);
313  starpu_sched_ctx_revert_task_counters_ctx_locked(sched_ctx_id, task->flops);
314  return 1;
315  }
316  return 0;
317 }
318 
322 
323 #pragma GCC visibility pop
324 
325 #endif // __SCHED_CONTEXT_H__
Definition: barrier_counter.h:29
starpu_worker_archtype
Definition: starpu_worker.h:66
#define STARPU_NMAXWORKERS
Definition: starpu_config.h:284
Definition: starpu_worker.h:113
#define struct
Definition: list.h:175
unsigned _starpu_sched_ctx_get_current_context() STARPU_ATTRIBUTE_VISIBILITY_DEFAULT
int _starpu_workers_able_to_execute_task(struct starpu_task *task, struct _starpu_sched_ctx *sched_ctx)
void _starpu_init_all_sched_ctxs(struct _starpu_machine_config *config)
struct _starpu_sched_ctx * _starpu_create_sched_ctx(struct starpu_sched_policy *policy, int *workerid, int nworkerids, unsigned is_init_sched, const char *sched_name, int min_prio_set, int min_prio, int max_prio_set, int max_prio, unsigned awake_workers, void(*sched_policy_callback)(unsigned), void *user_data, int nsub_ctxs, int *sub_ctxs, int nsms)
void _starpu_worker_apply_deferred_ctx_changes(void)
struct _starpu_sched_ctx * __starpu_sched_ctx_get_sched_ctx_for_worker_and_job(struct _starpu_worker *worker, struct _starpu_job *j)
int _starpu_wait_for_all_tasks_of_sched_ctx(unsigned sched_ctx_id)
int _starpu_get_workers_of_sched_ctx(unsigned sched_ctx_id, int *pus, enum starpu_worker_archtype arch)
unsigned _starpu_worker_belongs_to_a_sched_ctx(int workerid, unsigned sched_ctx_id)
void _starpu_decrement_nsubmitted_tasks_of_sched_ctx(unsigned sched_ctx_id)
void _starpu_delete_all_sched_ctxs()
unsigned _starpu_sched_ctx_last_worker_awake(struct _starpu_worker *worker)
void _starpu_worker_gets_out_of_ctx(unsigned sched_ctx_id, struct _starpu_worker *worker)
void _starpu_sched_ctx_post_exec_task_cb(int workerid, struct starpu_task *task, size_t data_size, uint32_t footprint)
int _starpu_wait_for_n_submitted_tasks_of_sched_ctx(unsigned sched_ctx_id, unsigned n)
Definition: sched_ctx.h:172
Definition: jobs.h:74
Definition: workers.h:441
Definition: sched_ctx.h:48
int max_ncpus
Definition: sched_ctx.h:95
unsigned id
Definition: sched_ctx.h:50
int min_priority
Definition: sched_ctx.h:112
int main_master
Definition: sched_ctx.h:137
unsigned awake_workers
Definition: sched_ctx.h:152
struct _starpu_barrier_counter tasks_barrier
Definition: sched_ctx.h:73
unsigned is_initial_sched
Definition: sched_ctx.h:70
double ready_flops
Definition: sched_ctx.h:79
unsigned parallel_view
Definition: sched_ctx.h:147
long iterations[2]
Definition: sched_ctx.h:82
int min_ngpus
Definition: sched_ctx.h:98
int max_ngpus
Definition: sched_ctx.h:101
unsigned finished_submit
Definition: sched_ctx.h:108
unsigned do_schedule
Definition: sched_ctx.h:53
struct starpu_perfmodel_arch perf_arch
Definition: sched_ctx.h:143
unsigned nesting_sched_ctx
Definition: sched_ctx.h:140
void(* close_callback)(unsigned sched_ctx_id, void *args)
Definition: sched_ctx.h:128
hwloc_bitmap_t hwloc_workers_set
Definition: sched_ctx.h:119
struct starpu_sched_policy * sched_policy
Definition: sched_ctx.h:59
const char * name
Definition: sched_ctx.h:56
void * policy_data
Definition: sched_ctx.h:62
unsigned inheritor
Definition: sched_ctx.h:104
struct starpu_sched_ctx_performance_counters * perf_counters
Definition: sched_ctx.h:124
struct _starpu_barrier_counter ready_tasks_barrier
Definition: sched_ctx.h:76
int min_ncpus
Definition: sched_ctx.h:92
unsigned hierarchy_level
Definition: sched_ctx.h:132
void * user_data
Definition: sched_ctx.h:65
void(* callback_sched)(unsigned)
Definition: sched_ctx.h:155
int nsms
Definition: sched_ctx.h:161
Definition: workers.h:155