torchsparse.nn.functional#

relu(input: SparseTensor, inplace: bool = True) SparseTensor[source]#
leaky_relu(input: SparseTensor, negative_slope: float = 0.1, inplace: bool = True) SparseTensor[source]#
build_kernel_map(_coords: Tensor, kernel_size: int | Tuple[int, ...] = 2, stride: int | Tuple[int, ...] = 2, tensor_stride: int | Tuple[int, ...] = 1, mode='hashmap') Tensor[source]#
conv3d(input: SparseTensor, weight: Tensor, kernel_size: int | List[int] | Tuple[int, ...], bias: Tensor | None = None, stride: int | List[int] | Tuple[int, ...] = 1, dilation: int | Tuple[int, ...] = 1, transposed: bool = False, epsilon: float = 0.0, mm_thresh: int = 0, kmap_mode: str = 'hashmap') SparseTensor[source]#
spcount(coords: Tensor, num: Tensor) Tensor[source]#
spcrop(input: SparseTensor, coords_min: Tuple[int, ...] | None = None, coords_max: Tuple[int, ...] | None = None) SparseTensor[source]#
spdevoxelize(feats: Tensor, coords: Tensor, weights: Tensor) Tensor[source]#
calc_ti_weights(coords: Tensor, idx_query: Tensor, scale: float = 1) Tensor[source]#
spdownsample(coords: Tensor, stride: int | Tuple[int, ...] = 2, kernel_size: int | Tuple[int, ...] = 2, tensor_stride: int | Tuple[int, ...] = 1) Tensor[source]#
sphash(coords: Tensor, offsets: Tensor | None = None) Tensor[source]#
global_avg_pool(inputs: SparseTensor) Tensor[source]#
global_max_pool(inputs: SparseTensor) Tensor[source]#
sphashquery(queries: Tensor, references: Tensor) Tensor[source]#
spvoxelize(feats: Tensor, coords: Tensor, counts: Tensor) Tensor[source]#