gru: support cch_allocate for kernel threads
Change the interface to cch_allocate so that it can be used to allocate GRU contexts for kernel threads. Kernel threads use the GRU in unmapped mode and do not require ASIDs for the GRU TLB. Signed-off-by: Jack Steiner <steiner@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
d57c82b107
commit
6e9100741c
4 changed files with 17 additions and 19 deletions
|
@ -72,18 +72,8 @@ static int wait_instruction_complete(void *h, enum mcs_op opc)
|
|||
return status;
|
||||
}
|
||||
|
||||
int cch_allocate(struct gru_context_configuration_handle *cch,
|
||||
int asidval, int sizeavail, unsigned long cbrmap,
|
||||
unsigned long dsrmap)
|
||||
int cch_allocate(struct gru_context_configuration_handle *cch)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
cch->asid[i] = (asidval++);
|
||||
cch->sizeavail[i] = sizeavail;
|
||||
}
|
||||
cch->dsr_allocation_map = dsrmap;
|
||||
cch->cbr_allocation_map = cbrmap;
|
||||
cch->opc = CCHOP_ALLOCATE;
|
||||
start_instruction(cch);
|
||||
return wait_instruction_complete(cch, cchop_allocate);
|
||||
|
|
|
@ -480,9 +480,7 @@ enum gru_cbr_state {
|
|||
/* minimum TLB purge count to ensure a full purge */
|
||||
#define GRUMAXINVAL 1024UL
|
||||
|
||||
int cch_allocate(struct gru_context_configuration_handle *cch,
|
||||
int asidval, int sizeavail, unsigned long cbrmap, unsigned long dsrmap);
|
||||
|
||||
int cch_allocate(struct gru_context_configuration_handle *cch);
|
||||
int cch_start(struct gru_context_configuration_handle *cch);
|
||||
int cch_interrupt(struct gru_context_configuration_handle *cch);
|
||||
int cch_deallocate(struct gru_context_configuration_handle *cch);
|
||||
|
|
|
@ -672,7 +672,10 @@ int gru_kservices_init(struct gru_state *gru)
|
|||
cch->tlb_int_enable = 0;
|
||||
cch->tfm_done_bit_enable = 0;
|
||||
cch->unmap_enable = 1;
|
||||
err = cch_allocate(cch, 0, 0, cbr_map, dsr_map);
|
||||
cch->dsr_allocation_map = dsr_map;
|
||||
cch->cbr_allocation_map = cbr_map;
|
||||
|
||||
err = cch_allocate(cch);
|
||||
if (err) {
|
||||
gru_dbg(grudev,
|
||||
"Unable to allocate kernel CCH: gid %d, err %d\n",
|
||||
|
|
|
@ -537,13 +537,12 @@ void gru_load_context(struct gru_thread_state *gts)
|
|||
{
|
||||
struct gru_state *gru = gts->ts_gru;
|
||||
struct gru_context_configuration_handle *cch;
|
||||
int err, asid, ctxnum = gts->ts_ctxnum;
|
||||
int i, err, asid, ctxnum = gts->ts_ctxnum;
|
||||
|
||||
gru_dbg(grudev, "gts %p\n", gts);
|
||||
cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
|
||||
|
||||
lock_cch_handle(cch);
|
||||
asid = gru_load_mm_tracker(gru, gts);
|
||||
cch->tfm_fault_bit_enable =
|
||||
(gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
|
||||
|| gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
|
||||
|
@ -553,8 +552,16 @@ void gru_load_context(struct gru_thread_state *gts)
|
|||
cch->tlb_int_select = gts->ts_tlb_int_select;
|
||||
}
|
||||
cch->tfm_done_bit_enable = 0;
|
||||
err = cch_allocate(cch, asid, gts->ts_sizeavail, gts->ts_cbr_map,
|
||||
gts->ts_dsr_map);
|
||||
cch->dsr_allocation_map = gts->ts_dsr_map;
|
||||
cch->cbr_allocation_map = gts->ts_cbr_map;
|
||||
asid = gru_load_mm_tracker(gru, gts);
|
||||
cch->unmap_enable = 0;
|
||||
for (i = 0; i < 8; i++) {
|
||||
cch->asid[i] = asid + i;
|
||||
cch->sizeavail[i] = gts->ts_sizeavail;
|
||||
}
|
||||
|
||||
err = cch_allocate(cch);
|
||||
if (err) {
|
||||
gru_dbg(grudev,
|
||||
"err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n",
|
||||
|
|
Loading…
Reference in a new issue