Table 1.
# Usage 1: molgrid functions taking Grid objects can be passed Torch tensors directly, # with conversions managed internally tensor = torch.zeros(tensor_shape, dtype=torch.float32, device='cuda') molgrid.gmaker.forward(batch, tensor) |
# Usage 2: construct Grid as a view over a Torch tensor with provided helper function tensor = torch.zeros((2,2), dtype=torch.float32, device='cuda') grid=molgrid.tensor_as_grid(tensor) # dimensions and data location are inferred # alternatively, construct Grid view over Torch tensor directly grid = molgrid.Grid2fCUDA(tensor) |
# Usage 3: copy ManagedGrid data to NumPy array # first, construct a ManagedGrid mgrid = molgrid.MGridlf(batch_size) # copy to GPU and do work on it there mgrid.gpu() # (do work) # copy ManagedGrid data to a NumPy array with helper function; # this copies data back to the CPU if necessary array1 = mgrid. tonumpyO # alternatively, construct NumPy array with a copy of ManagedGrid CPU data; # must sync to CPU first mgrid.cpu() array2 = np.array(mgrid) |
# Usage 4: construct Grid from NumPy array array3 = np.zeros((2,2), dtype=np.float32) # must match source and destination dtypes tensor = molgrid.Grid2f(array3) |