This patch enables the use of KMP_AFFINITY=balanced on non-MIC Architectures. The restriction for using balanced affinity on non-MIC architectures is it only works for one-package machines.

llvm-svn: 225794
This commit is contained in:
Andrey Churbanov 2015-01-13 14:54:00 +00:00
parent 3984da5608
commit f28f613eda
5 changed files with 19 additions and 39 deletions

View File

@ -679,9 +679,7 @@ enum affinity_type {
affinity_compact,
affinity_scatter,
affinity_explicit,
#if KMP_MIC
affinity_balanced,
#endif
affinity_disabled, // not used outsize the env var parser
affinity_default
};
@ -2336,9 +2334,7 @@ typedef struct KMP_ALIGN_CACHE kmp_base_team {
int t_first_place; // first & last place in parent thread's partition.
int t_last_place; // Restore these values to master after par region.
#endif // OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
#if KMP_MIC
int t_size_changed; // team size was changed?: 0: no, 1: yes, -1: changed via omp_set_num_threads() call
#endif
// Read/write by workers as well -----------------------------------------------------------------------
#if KMP_ARCH_X86 || KMP_ARCH_X86_64

View File

@ -3819,9 +3819,8 @@ __kmp_aux_affinity_initialize(void)
}
goto sortAddresses;
# if KMP_MIC
case affinity_balanced:
// Balanced works only for the case of a single package and uniform topology
// Balanced works only for the case of a single package
if( nPackages > 1 ) {
if( __kmp_affinity_verbose || __kmp_affinity_warnings ) {
KMP_WARNING( AffBalancedNotAvail, "KMP_AFFINITY" );
@ -3872,7 +3871,6 @@ __kmp_aux_affinity_initialize(void)
break;
}
# endif
sortAddresses:
//
@ -4019,10 +4017,7 @@ __kmp_affinity_set_init_mask(int gtid, int isa_root)
if (__kmp_nested_proc_bind.bind_types[0] == proc_bind_intel)
# endif
{
if ((__kmp_affinity_type == affinity_none)
# if KMP_MIC
|| (__kmp_affinity_type == affinity_balanced)
# endif
if ((__kmp_affinity_type == affinity_none) || (__kmp_affinity_type == affinity_balanced)
) {
# if KMP_OS_WINDOWS && KMP_ARCH_X86_64
if (__kmp_num_proc_groups > 1) {
@ -4397,7 +4392,6 @@ __kmp_aux_get_affinity_mask_proc(int proc, void **mask)
return KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask));
}
# if KMP_MIC
// Dynamic affinity settings - Affinity balanced
void __kmp_balanced_affinity( int tid, int nthreads )
@ -4626,8 +4620,6 @@ void __kmp_balanced_affinity( int tid, int nthreads )
}
}
# endif /* KMP_MIC */
#else
// affinity not supported

View File

@ -1536,12 +1536,12 @@ __kmp_fork_barrier(int gtid, int tid)
kmp_proc_bind_t proc_bind = team->t.t_proc_bind;
if (proc_bind == proc_bind_intel) {
#endif
#if KMP_MIC
#if KMP_AFFINITY_SUPPORTED
// Call dynamic affinity settings
if(__kmp_affinity_type == affinity_balanced && team->t.t_size_changed) {
__kmp_balanced_affinity(tid, team->t.t_nproc);
}
#endif
#endif // KMP_AFFINITY_SUPPORTED
#if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
}
else if ((proc_bind != proc_bind_false)

View File

@ -2268,10 +2268,8 @@ __kmp_set_num_threads( int new_nth, int gtid )
KMP_DEBUG_ASSERT( hot_team->t.t_threads[f] != NULL );
hot_team->t.t_threads[f]->th.th_team_nproc = new_nth;
}
#if KMP_MIC
// Special flag in case omp_set_num_threads() call
hot_team->t.t_size_changed = -1;
#endif
}
}
@ -2837,9 +2835,7 @@ __kmp_initialize_root( kmp_root_t *root )
// TODO???: hot_team->t.t_max_active_levels = __kmp_dflt_max_active_levels;
hot_team->t.t_sched.r_sched_type = r_sched.r_sched_type;
hot_team->t.t_sched.chunk = r_sched.chunk;
#if KMP_MIC
hot_team->t.t_size_changed = 0;
#endif
}
@ -4411,7 +4407,6 @@ __kmp_allocate_team( kmp_root_t *root, int new_nproc, int max_nproc,
put that case first. */
if (team->t.t_nproc == new_nproc) { // Check changes in number of threads
KA_TRACE( 20, ("__kmp_allocate_team: reusing hot team\n" ));
#if KMP_MIC
// This case can mean that omp_set_num_threads() was called and the hot team size
// was already reduced, so we check the special flag
if ( team->t.t_size_changed == -1 ) {
@ -4419,7 +4414,6 @@ __kmp_allocate_team( kmp_root_t *root, int new_nproc, int max_nproc,
} else {
team->t.t_size_changed = 0;
}
#endif
// TODO???: team->t.t_max_active_levels = new_max_active_levels;
team->t.t_sched = new_icvs->sched;
@ -4451,9 +4445,7 @@ __kmp_allocate_team( kmp_root_t *root, int new_nproc, int max_nproc,
else if( team->t.t_nproc > new_nproc ) {
KA_TRACE( 20, ("__kmp_allocate_team: decreasing hot team thread count to %d\n", new_nproc ));
#if KMP_MIC
team->t.t_size_changed = 1;
#endif
if ( __kmp_tasking_mode != tskm_immediate_exec ) {
kmp_task_team_t *task_team = team->t.t_task_team;
if ( ( task_team != NULL ) && TCR_SYNC_4(task_team->tt.tt_active) ) {
@ -4541,9 +4533,7 @@ __kmp_allocate_team( kmp_root_t *root, int new_nproc, int max_nproc,
KA_TRACE( 20, ("__kmp_allocate_team: increasing hot team thread count to %d\n", new_nproc ));
#if KMP_MIC
team->t.t_size_changed = 1;
#endif
#if KMP_NESTED_HOT_TEAMS

View File

@ -2128,14 +2128,12 @@ __kmp_parse_affinity_env( char const * name, char const * value,
__kmp_nested_proc_bind.bind_types[0] = proc_bind_intel;
# endif
buf = next;
# if KMP_MIC
} else if (__kmp_match_str("balanced", buf, (const char **)&next)) {
set_type( affinity_balanced );
# if OMP_40_ENABLED
__kmp_nested_proc_bind.bind_types[0] = proc_bind_intel;
# endif
buf = next;
# endif
} else if (__kmp_match_str("disabled", buf, (const char **)&next)) {
set_type( affinity_disabled );
# if OMP_40_ENABLED
@ -2309,7 +2307,6 @@ __kmp_parse_affinity_env( char const * name, char const * value,
KMP_WARNING( AffManyParamsForLogic, name, number[ 1 ] );
}; // if
} break;
# if KMP_MIC
case affinity_balanced: {
if ( count > 0 ) {
*out_compact = number[ 0 ];
@ -2318,16 +2315,20 @@ __kmp_parse_affinity_env( char const * name, char const * value,
*out_offset = number[ 1 ];
}; // if
// If granularity is neither thread nor core let it be default value=fine
if( __kmp_affinity_gran != affinity_gran_default && __kmp_affinity_gran != affinity_gran_fine
&& __kmp_affinity_gran != affinity_gran_thread && __kmp_affinity_gran != affinity_gran_core ) {
if ( __kmp_affinity_gran == affinity_gran_default ) {
# if KMP_MIC
if( __kmp_affinity_verbose || __kmp_affinity_warnings ) {
KMP_WARNING( AffGranUsing, "KMP_AFFINITY", "fine" );
}
__kmp_affinity_gran = affinity_gran_fine;
# else
if( __kmp_affinity_verbose || __kmp_affinity_warnings ) {
KMP_WARNING( AffGranUsing, "KMP_AFFINITY", "core" );
}
__kmp_affinity_gran = affinity_gran_fine;
__kmp_affinity_gran = affinity_gran_core;
# endif /* KMP_MIC */
}
} break;
# endif
case affinity_scatter:
case affinity_compact: {
if ( count > 0 ) {
@ -2468,12 +2469,10 @@ __kmp_stg_print_affinity( kmp_str_buf_t * buffer, char const * name, void * data
__kmp_str_buf_print( buffer, "%s=[%s],%s", "proclist",
__kmp_affinity_proclist, "explicit" );
break;
# if KMP_MIC
case affinity_balanced:
__kmp_str_buf_print( buffer, "%s,%d,%d", "balanced",
__kmp_affinity_compact, __kmp_affinity_offset );
break;
# endif
case affinity_disabled:
__kmp_str_buf_print( buffer, "%s", "disabled");
break;
@ -3026,10 +3025,15 @@ __kmp_stg_parse_proc_bind( char const * name, char const * value, void * data )
__kmp_stg_parse_bool( name, value, & enabled );
if ( enabled ) {
//
// OMP_PROC_BIND => granularity=core,scatter
// OMP_PROC_BIND => granularity=fine,scatter on MIC
// OMP_PROC_BIND => granularity=core,scatter elsewhere
//
__kmp_affinity_type = affinity_scatter;
# if KMP_MIC
__kmp_affinity_gran = affinity_gran_fine;
# else
__kmp_affinity_gran = affinity_gran_core;
# endif /* KMP_MIC */
}
else {
__kmp_affinity_type = affinity_none;
@ -4987,9 +4991,7 @@ __kmp_env_initialize( char const * string ) {
&& ( FIND( aff_str, "compact" ) == NULL )
&& ( FIND( aff_str, "scatter" ) == NULL )
&& ( FIND( aff_str, "explicit" ) == NULL )
# if KMP_MIC
&& ( FIND( aff_str, "balanced" ) == NULL )
# endif
&& ( FIND( aff_str, "disabled" ) == NULL ) ) {
__kmp_affinity_notype = __kmp_stg_find( "KMP_AFFINITY" );
}