Skip to content

Instantly share code, notes, and snippets.

@Roxbili
Created June 14, 2022 13:01
Show Gist options
  • Save Roxbili/fb79ad8500446f056ac699eb177a4090 to your computer and use it in GitHub Desktop.
Save Roxbili/fb79ad8500446f056ac699eb177a4090 to your computer and use it in GitHub Desktop.
Idle gpu
from multiprocessing import Pool
import numpy as np
import torch
import torch.nn as nn
torch.multiprocessing.set_start_method('forkserver', force=True)
# torch.multiprocessing.set_start_method('spawn')
def random_size():
"""
return
-------
a_size, b_size
"""
choice = [256, 512, 1024, 2048, 4096]
nums = np.random.choice(choice, size=3)
a_size = (nums[0], nums[1])
b_size = (nums[1], nums[2])
return a_size, b_size
def matmul(a, b):
while True:
result = torch.matmul(a, b)
if __name__ == '__main__':
gpu_num = torch.cuda.device_count()
print(f"{gpu_num} GPU is/are available")
# generate size
size_list = [random_size() for i in range(gpu_num)]
# generate random data
a = [torch.rand(size_list[i][0]).cuda(i) for i in range(gpu_num)]
b = [torch.rand(size_list[i][1]).cuda(i) for i in range(gpu_num)]
p = Pool(gpu_num)
for i in range(gpu_num):
p.apply_async(matmul, args=(a[i], b[i]))
p.close()
p.join()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment