Spaces:
Configuration error
Configuration error
File size: 9,958 Bytes
ce0d4fb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 |
import math
import numpy as np
from numba import jit, prange, cuda, float32
# https://github.com/talboger/fastdist
@jit(nopython=True, fastmath=True)
def cosine(u, v, w=None):
"""
:purpose:
Computes the cosine similarity between two 1D arrays
Unlike scipy's cosine distance, this returns similarity, which is 1 - distance
:params:
u, v : input arrays, both of shape (n,)
w : weights at each index of u and v. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
cosine : float, the cosine similarity between u and v
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u, v, w = np.random.RandomState(seed=0).rand(10000, 3).T
>>> fastdist.cosine(u, v, w)
0.7495065944399267
"""
n = len(u)
num = 0
u_norm, v_norm = 0, 0
for i in range(n):
num += u[i] * v[i] * w[i]
u_norm += abs(u[i]) ** 2 * w[i]
v_norm += abs(v[i]) ** 2 * w[i]
denom = (u_norm * v_norm) ** (1 / 2)
return num / denom
@jit(nopython=True, fastmath=True)
def cosine_vector_to_matrix(u, m):
"""
:purpose:
Computes the cosine similarity between a 1D array and rows of a matrix
:params:
u : input vector of shape (n,)
m : input matrix of shape (m, n)
:returns:
cosine vector : np.array, of shape (m,) vector containing cosine similarity between u
and the rows of m
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u = np.random.RandomState(seed=0).rand(10)
>>> m = np.random.RandomState(seed=0).rand(100, 10)
>>> fastdist.cosine_vector_to_matrix(u, m)
(returns an array of shape (100,))
"""
norm = 0
for i in range(len(u)):
norm += abs(u[i]) ** 2
u = u / norm ** (1 / 2)
for i in range(m.shape[0]):
norm = 0
for j in range(len(m[i])):
norm += abs(m[i][j]) ** 2
m[i] = m[i] / norm ** (1 / 2)
return np.dot(u, m.T)
@jit(nopython=True, fastmath=True)
def cosine_matrix_to_matrix(a, b):
"""
:purpose:
Computes the cosine similarity between the rows of two matrices
:params:
a, b : input matrices of shape (m, n) and (k, n)
the matrices must share a common dimension at index 1
:returns:
cosine matrix : np.array, an (m, k) array of the cosine similarity
between the rows of a and b
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> a = np.random.RandomState(seed=0).rand(10, 50)
>>> b = np.random.RandomState(seed=0).rand(100, 50)
>>> fastdist.cosine_matrix_to_matrix(a, b)
(returns an array of shape (10, 100))
"""
for i in range(a.shape[0]):
norm = 0
for j in range(len(a[i])):
norm += abs(a[i][j]) ** 2
a[i] = a[i] / norm ** (1 / 2)
for i in range(b.shape[0]):
norm = 0
for j in range(len(b[i])):
norm += abs(b[i][j]) ** 2
b[i] = b[i] / norm ** (1 / 2)
return np.dot(a, b.T)
@jit(nopython=True, fastmath=True)
def euclidean(u, v):
"""
:purpose:
Computes the Euclidean distance between two 1D arrays
:params:
u, v : input arrays, both of shape (n,)
w : weights at each index of u and v. array of shape (n,)
if no w is set, it is initialized as an array of ones
such that it will have no impact on the output
:returns:
euclidean : float, the Euclidean distance between u and v
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u, v, w = np.random.RandomState(seed=0).rand(10000, 3).T
>>> fastdist.euclidean(u, v, w)
28.822558591834163
"""
n = len(u)
dist = 0
for i in range(n):
dist += abs(u[i] - v[i]) ** 2
return dist ** (1 / 2)
@jit(nopython=True, fastmath=True)
def euclidean_vector_to_matrix_distance(u, m):
"""
:purpose:
Computes the distance between a vector and the rows of a matrix using any given metric
:params:
u : input vector of shape (n,)
m : input matrix of shape (m, n)
distance vector : np.array, of shape (m,) vector containing the distance between u
and the rows of m
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> u = np.random.RandomState(seed=0).rand(10)
>>> m = np.random.RandomState(seed=0).rand(100, 10)
>>> fastdist.vector_to_matrix_distance(u, m)
(returns an array of shape (100,))
:note:
the cosine similarity uses its own function, cosine_vector_to_matrix.
this is because normalizing the rows and then taking the dot product
of the vector and matrix heavily optimizes the computation. the other similarity
metrics do not have such an optimization, so we loop through them
"""
n = m.shape[0]
out = np.zeros((n), dtype=np.float32)
for i in prange(n):
dist = 0
for l in range(len(u)):
dist += abs(u[l] - m[i][l]) ** 2
out[i] = dist ** (1 / 2)
return out
@cuda.jit
def gpu_kernel_euclidean_vector_to_matrix_distance(u, m, u_dim0, m_dim0, out):
# Thread id in a 1D block
tx = cuda.threadIdx.x
# Block id in a 1D grid
ty = cuda.blockIdx.x
# Block width, i.e. number of threads per block
bw = cuda.blockDim.x
# Compute flattened index inside the array
pos = tx + ty * bw
if pos < m_dim0: # Check array boundaries
dist = 0
for l in range(u_dim0):
d = abs(u[l] - m[pos][l])
dist += d * d
out[pos] = dist ** (1 / 2)
def euclidean_vector_to_matrix_distance_gpu(u, m):
m_dim0 = m.shape[0]
u_dim0 = u.shape[0]
out = np.zeros((m_dim0), dtype=np.float32)
threadsperblock = 16
blockspergrid = (m_dim0 + (threadsperblock - 1)) // threadsperblock
gpu_kernel_euclidean_vector_to_matrix_distance[blockspergrid, threadsperblock](u, m, u_dim0, m_dim0, out)
return out
# https://numba.readthedocs.io/en/stable/cuda/examples.html
@cuda.jit
def gpu_kernel_euclidean_matrix_to_matrix_distance_fast(A, B, C):
TPB = 16
# Define an array in the shared memory
# The size and type of the arrays must be known at compile time
sA = cuda.shared.array(shape=(TPB, TPB), dtype=float32)
sB = cuda.shared.array(shape=(TPB, TPB), dtype=float32)
x, y = cuda.grid(2)
tx = cuda.threadIdx.x
ty = cuda.threadIdx.y
bpg = cuda.gridDim.x # blocks per grid
# Each thread computes one element in the result matrix.
# The dot product is chunked into dot products of TPB-long vectors.
tmp = float32(0.)
for i in range(bpg):
# Preload data into shared memory
sA[ty, tx] = 0
sB[ty, tx] = 0
if y < A.shape[0] and (tx + i * TPB) < A.shape[1]:
sA[ty, tx] = A[y, tx + i * TPB]
if x < B.shape[1] and (ty + i * TPB) < B.shape[0]:
sB[ty, tx] = B[ty + i * TPB, x]
# Wait until all threads finish preloading
cuda.syncthreads()
# Computes partial product on the shared memory
for j in range(TPB):
d = abs(sA[ty, j] - sB[j, tx])
tmp += d * d
# Wait until all threads finish computing
cuda.syncthreads()
if y < C.shape[0] and x < C.shape[1]:
C[y, x] = tmp ** (1 / 2)
def euclidean_matrix_to_matrix_distance_gpu_fast(u, m):
u_dim0 = u.shape[0]
m_dim1 = m.shape[1]
# vec_dim = u.shape[1]
# assert vec_dim == m.shape[1]
out = np.zeros((u_dim0, m_dim1), dtype=np.float32)
threadsperblock = (16, 16)
grid_y_max = max(u.shape[0], m.shape[0])
grid_x_max = max(u.shape[1], m.shape[1])
blockspergrid_x = math.ceil(grid_x_max / threadsperblock[0])
blockspergrid_y = math.ceil(grid_y_max / threadsperblock[1])
blockspergrid = (blockspergrid_x, blockspergrid_y)
u_d = cuda.to_device(u)
m_d = cuda.to_device(m)
out_d = cuda.to_device(out)
gpu_kernel_euclidean_matrix_to_matrix_distance_fast[blockspergrid, threadsperblock](u_d, m_d, out_d)
out = out_d.copy_to_host()
return out
@jit(cache=True, nopython=True, parallel=True, fastmath=True, boundscheck=False, nogil=True)
def euclidean_matrix_to_matrix_distance(a, b):
"""
:purpose:
Computes the distance between the rows of two matrices using any given metric
:params:
a, b : input matrices either of shape (m, n) and (k, n)
the matrices must share a common dimension at index 1
metric : the function used to calculate the distance
metric_name : str of the function name. this is only used for
the if statement because cosine similarity has its
own function
:returns:
distance matrix : np.array, an (m, k) array of the distance
between the rows of a and b
:example:
>>> from fastdist import fastdist
>>> import numpy as np
>>> a = np.random.RandomState(seed=0).rand(10, 50)
>>> b = np.random.RandomState(seed=0).rand(100, 50)
>>> fastdist.matrix_to_matrix_distance(a, b, fastdist.cosine, "cosine")
(returns an array of shape (10, 100))
:note:
the cosine similarity uses its own function, cosine_matrix_to_matrix.
this is because normalizing the rows and then taking the dot product
of the two matrices heavily optimizes the computation. the other similarity
metrics do not have such an optimization, so we loop through them
"""
n, m = a.shape[0], b.shape[0]
out = np.zeros((n, m), dtype=np.float32)
for i in prange(n):
for j in range(m):
dist = 0
for l in range(len(a[i])):
dist += abs(a[i][l] - b[j][l]) ** 2
out[i][j] = dist ** (1 / 2)
return out
|