gru: change context load and unload
Remove "static" from the functions for loading/unloading GRU contexts. These functions will be called from other GRU files. Fix bug in unlocking gru context. Signed-off-by: Jack Steiner <steiner@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
364b76df80
commit
d57c82b107
3 changed files with 11 additions and 7 deletions
|
@ -558,8 +558,8 @@ int gru_handle_user_call_os(unsigned long cb)
|
||||||
* CCH may contain stale data if ts_force_cch_reload is set.
|
* CCH may contain stale data if ts_force_cch_reload is set.
|
||||||
*/
|
*/
|
||||||
if (gts->ts_gru && gts->ts_force_cch_reload) {
|
if (gts->ts_gru && gts->ts_force_cch_reload) {
|
||||||
gru_update_cch(gts, 0);
|
|
||||||
gts->ts_force_cch_reload = 0;
|
gts->ts_force_cch_reload = 0;
|
||||||
|
gru_update_cch(gts, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = -EAGAIN;
|
ret = -EAGAIN;
|
||||||
|
@ -644,7 +644,7 @@ static int gru_unload_all_contexts(void)
|
||||||
if (gts && mutex_trylock(>s->ts_ctxlock)) {
|
if (gts && mutex_trylock(>s->ts_ctxlock)) {
|
||||||
spin_unlock(&gru->gs_lock);
|
spin_unlock(&gru->gs_lock);
|
||||||
gru_unload_context(gts, 1);
|
gru_unload_context(gts, 1);
|
||||||
gru_unlock_gts(gts);
|
mutex_unlock(>s->ts_ctxlock);
|
||||||
spin_lock(&gru->gs_lock);
|
spin_lock(&gru->gs_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -533,7 +533,7 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
|
||||||
* Load a GRU context by copying it from the thread data structure in memory
|
* Load a GRU context by copying it from the thread data structure in memory
|
||||||
* to the GRU.
|
* to the GRU.
|
||||||
*/
|
*/
|
||||||
static void gru_load_context(struct gru_thread_state *gts)
|
void gru_load_context(struct gru_thread_state *gts)
|
||||||
{
|
{
|
||||||
struct gru_state *gru = gts->ts_gru;
|
struct gru_state *gru = gts->ts_gru;
|
||||||
struct gru_context_configuration_handle *cch;
|
struct gru_context_configuration_handle *cch;
|
||||||
|
@ -600,8 +600,8 @@ int gru_update_cch(struct gru_thread_state *gts, int force_unload)
|
||||||
gts->ts_tlb_int_select = gru_cpu_fault_map_id();
|
gts->ts_tlb_int_select = gru_cpu_fault_map_id();
|
||||||
cch->tlb_int_select = gru_cpu_fault_map_id();
|
cch->tlb_int_select = gru_cpu_fault_map_id();
|
||||||
cch->tfm_fault_bit_enable =
|
cch->tfm_fault_bit_enable =
|
||||||
(gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
|
(gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
|
||||||
|| gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
|
|| gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
|
||||||
} else {
|
} else {
|
||||||
for (i = 0; i < 8; i++)
|
for (i = 0; i < 8; i++)
|
||||||
cch->asid[i] = 0;
|
cch->asid[i] = 0;
|
||||||
|
@ -645,7 +645,7 @@ static int gru_retarget_intr(struct gru_thread_state *gts)
|
||||||
#define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \
|
#define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \
|
||||||
((g)+1) : &(b)->bs_grus[0])
|
((g)+1) : &(b)->bs_grus[0])
|
||||||
|
|
||||||
static void gru_steal_context(struct gru_thread_state *gts, int blade_id)
|
void gru_steal_context(struct gru_thread_state *gts, int blade_id)
|
||||||
{
|
{
|
||||||
struct gru_blade_state *blade;
|
struct gru_blade_state *blade;
|
||||||
struct gru_state *gru, *gru0;
|
struct gru_state *gru, *gru0;
|
||||||
|
@ -711,7 +711,7 @@ static void gru_steal_context(struct gru_thread_state *gts, int blade_id)
|
||||||
/*
|
/*
|
||||||
* Scan the GRUs on the local blade & assign a GRU context.
|
* Scan the GRUs on the local blade & assign a GRU context.
|
||||||
*/
|
*/
|
||||||
static struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts,
|
struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts,
|
||||||
int blade)
|
int blade)
|
||||||
{
|
{
|
||||||
struct gru_state *gru, *grux;
|
struct gru_state *gru, *grux;
|
||||||
|
|
|
@ -611,6 +611,10 @@ extern struct gru_thread_state *gru_find_thread_state(struct vm_area_struct
|
||||||
*vma, int tsid);
|
*vma, int tsid);
|
||||||
extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct
|
extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct
|
||||||
*vma, int tsid);
|
*vma, int tsid);
|
||||||
|
extern struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts,
|
||||||
|
int blade);
|
||||||
|
extern void gru_load_context(struct gru_thread_state *gts);
|
||||||
|
extern void gru_steal_context(struct gru_thread_state *gts, int blade_id);
|
||||||
extern void gru_unload_context(struct gru_thread_state *gts, int savestate);
|
extern void gru_unload_context(struct gru_thread_state *gts, int savestate);
|
||||||
extern int gru_update_cch(struct gru_thread_state *gts, int force_unload);
|
extern int gru_update_cch(struct gru_thread_state *gts, int force_unload);
|
||||||
extern void gts_drop(struct gru_thread_state *gts);
|
extern void gts_drop(struct gru_thread_state *gts);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue