drm/amdgpu: make amdgpu_mn_get wait for mmap_sem killable
amdgpu_mn_get which is called during ioct path relies on mmap_sem for write. If the waiting task gets killed by the oom killer it would block oom_reaper from asynchronous address space reclaim and reduce the chances of timely OOM resolving. Wait for the lock in the killable mode and return with EINTR if the task got killed while waiting. [arnd@arndb.de: use ERR_PTR() to return from amdgpu_mn_get] Signed-off-by: Michal Hocko <mhocko@suse.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Christian König <christian.koenig@amd.com> Cc: David Airlie <airlied@linux.ie> Cc: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
2267c2999b
commit
b5637051f1
1 changed files with 4 additions and 1 deletions
|
@ -232,7 +232,10 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
|
|||
int r;
|
||||
|
||||
mutex_lock(&adev->mn_lock);
|
||||
down_write(&mm->mmap_sem);
|
||||
if (down_write_killable(&mm->mmap_sem)) {
|
||||
mutex_unlock(&adev->mn_lock);
|
||||
return ERR_PTR(-EINTR);
|
||||
}
|
||||
|
||||
hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm)
|
||||
if (rmn->mm == mm)
|
||||
|
|
Loading…
Reference in a new issue