Dataset Viewer (First 5GB)
Auto-converted to Parquet
query
stringlengths
33
521
document
stringlengths
8
49.6k
metadata
dict
negatives
sequencelengths
5
101
negative_scores
sequencelengths
5
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Compute the matrixvector product y = Cu where C is a circulant matrix All matrices are real
def circulant_multiplication(u, a): return real(ifft(fft(a)*fft(u)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def covar(fx,cx):\n \n fx = np.array(fx)\n cx = np.array(cx)\n \n shape_fx = fx.shape\n shape_cx = cx.shape\n \n \n if shape_fx[1] != shape_cx[0]:\n print('-----------------------------------------')\n print(\"Shapes of fx and cx cannot be multiplied:\")\n print(shape_fx,\"x\",shape_cx)\n print('-----------------------------------------')\n raise ValueError('Input matrices are not compliant')\n \n cy = np.dot(np.dot(fx,cx),fx.T)\n \n print(\"Size of Cy matrix: \",np.shape(cy))\n \n return cy", "def __matmul__(self, q: np.ndarray) -> np.ndarray:\n return self.product(q)", "def matrix_vector_prod(m,u):\n each_product = []\n for v in m:\n each_product.append(dot_prod(v, u))\n return each_product", "def __matmul__(self, csys):\n self._transform(csys)\n return self", "def factor_circulant_multiplication(u, x, k=1):\n n = len(u) \n D_k = (k**(1/n))**np.arange(0,n)\n Lambda = fft(D_k*x)\n return (1/D_k)*real(ifft(Lambda*fft(D_k*u))) # y", "def updateC(A, U, B):\n \n m_dim = A.shape[1] \n q_dim = B.shape[0]\n \n C_tensor = np.zeros((m_dim, m_dim, q_dim), dtype=np.complex)\n \n for k in range(q_dim):\n A_k = A[:, :, k]\n b_k = B[k]\n \n x_hat = U @ b_k\n y_hat = A_k.conj().T @ x_hat\n \n phase_y = np.exp(1j*np.angle(y_hat))\n #phase_y = np.sign(y_hat)\n C_k = np.diag(phase_y)\n C_tensor[:, :, k] = C_k\n \n \n return C_tensor", "def c_matrix(x1,x2,x3):\n\tC = np.array([\t[\t2*(x2-x1), \t\t(x2-x1), \t\t\t0\t\t\t], \\\n\t\t\t\t\t[\t(x2-x1), \t\t2*(x3-x1), \t\t(x3-x2)\t\t], \\\n\t\t\t\t\t[\t0,\t\t\t\t(x3-x2),\t\t2*(x3-x2)\t] \t], \\\n\t\t\t\t\tfloat)\n\treturn(C)", "def toeplitz_multiplication(u, c, r=None):\n n = len(u)\n if r is None:\n r = c\n u1 = zeros((2*n))\n u1[0:n] = u\n \n c = np.concatenate((c, [0], r[-1:0:-1])) \n \n y1 = circulant_multiplication(u1, c)\n \n return y1[0:n]", "def matmul(x, y):\n return np.matmul(x, y)", "def CoTang(M):\n x = [sy.Dummy() for _ in range(nargs(M))]\n fx = [sy.Dummy() for _ in range(len(x))]\n\n y = list(M(*x))\n J = Jac(M)(*x)\n J = sy.Matrix(J).reshape(len(y), len(x))\n\n fy = list(J.T.inv() @ sy.Matrix(fx))\n return sy.lambdify(\n x + fx,\n y + fy,\n 'sympy',\n )", "def _C(self):\n\n # Find the local x and y coordinates at each node\n xi = 0\n yi = 0\n xj = self.width()\n yj = 0\n xm = xj\n ym = self.height()\n xn = 0\n yn = ym\n\n # Calculate the [C] coefficient matrix\n C = array([[1, xi, yi, xi**2, xi*yi, yi**2, xi**3, xi**2*yi, xi*yi**2, yi**3, xi**3*yi, xi*yi**3],\n [0, 0, 1, 0, xi, 2*yi, 0, xi**2, 2*xi*yi, 3*yi**2, xi**3, 3*xi*yi**2],\n [0, -1, 0, -2*xi, -yi, 0, -3*xi**2, -2*xi*yi, -yi**2, 0, -3*xi**2*yi, -yi**3],\n \n [1, xj, yj, xj**2, xj*yj, yj**2, xj**3, xj**2*yj, xj*yj**2, yj**3, xj**3*yj, xj*yj**3],\n [0, 0, 1, 0, xj, 2*yj, 0, xj**2, 2*xj*yj, 3*yj**2, xj**3, 3*xj*yj**2],\n [0, -1, 0, -2*xj, -yj, 0, -3*xj**2, -2*xj*yj, -yj**2, 0, -3*xj**2*yj, -yj**3],\n\n [1, xm, ym, xm**2, xm*ym, ym**2, xm**3, xm**2*ym, xm*ym**2, ym**3, xm**3*ym, xm*ym**3],\n [0, 0, 1, 0, xm, 2*ym, 0, xm**2, 2*xm*ym, 3*ym**2, xm**3, 3*xm*ym**2],\n [0, -1, 0, -2*xm, -ym, 0, -3*xm**2, -2*xm*ym, -ym**2, 0, -3*xm**2*ym, -ym**3],\n\n [1, xn, yn, xn**2, xn*yn, yn**2, xn**3, xn**2*yn, xn*yn**2, yn**3, xn**3*yn, xn*yn**3],\n [0, 0, 1, 0, xn, 2*yn, 0, xn**2, 2*xn*yn, 3*yn**2, xn**3, 3*xn*yn**2],\n [0, -1, 0, -2*xn, -yn, 0, -3*xn**2, -2*xn*yn, -yn**2, 0, -3*xn**2*yn, -yn**3]])\n \n # Return the coefficient matrix\n return C", "def simple_doct_product(u, v):\n v = [i / (sum(v)) for i in v]\n\n return np.dot(u, v)", "def matvec(self, x):\n return self * x", "def p_ym_c(pm,px,py,pyx_c,pmx_c):\n pym_c = np.zeros((py.size,pm.size))\n for yi in range(py.size):\n for mi in range(pm.size):\n for xi in range(px.size):\n pym_c[yi,mi] += (1./pm[mi])*pyx_c[yi,xi]*pmx_c[mi,xi]*px[xi]\n return pym_c", "def cofactorMatrix(self):\n returnvalue = Matrix()\n for i in range(self._height):\n newRow = list()\n for j in range(self._width):\n newRow.append(self.cofactor(i, j))\n returnvalue.addRow(*newRow)\n return returnvalue", "def matmul(x, y):\n if len(list(y.size())) == 2:\n # if one of them is a vector (i.e. wanting to do MV mult)\n z = torch.zeros(2, x.size()[1], dtype=torch.double, device=x.device)\n z[0] = torch.mv(x[0], y[0]) - torch.mv(x[1], y[1])\n z[1] = torch.mv(x[0], y[1]) + torch.mv(x[1], y[0])\n\n if len(list(y.size())) == 3:\n z = torch.zeros(\n 2, x.size()[1], y.size()[2], dtype=torch.double, device=x.device\n )\n z[0] = torch.matmul(x[0], y[0]) - torch.matmul(x[1], y[1])\n z[1] = torch.matmul(x[0], y[1]) + torch.matmul(x[1], y[0])\n\n return z", "def cofactor_matrix(self):\n resp = []\n len_b = len(self.take_vec())\n for i in range(self.order):\n _matrix = aux.cofactor(self.take_matrix(),\n (i, self.order-1)\n )\n _resp = math.pow(-1, len_b-1)\n _resp = _resp * np.linalg.det(_matrix)\n _resp = _resp * math.pow(-1, i * (self.order-1))\n resp.append(int(round(_resp)))\n\n return resp", "def method3(self):\n cres=0.\n Ux_aloc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy_aloc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.ALDM[ix ,iy, : , : ]\n mat2=self.ALDM[(ix%self.kS.Nx)+1, iy, : , : ]\n mat3=self.ALDM[ix ,(iy%self.kS.Ny)+1, : , : ]\n \n Ux_aloc[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[self.NL-1:,self.NL-1:])\n Uy_aloc[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[self.NL-1:,self.NL-1:])\n\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux_aloc[ix,iy]*Uy_aloc[ix+1,iy]/Ux_aloc[ix,iy+1]/Uy_aloc[ix,iy])\n cres+=ftemp/2./pi/1j\n \n return cres.real\n #End of method3", "def naive_matrix_vector_dot(x, y):\n assert len(x.shape) == 2\n assert len(y.shape) == 1\n assert x.shape[1] == y.shape[0]\n\n z = np.zeros(x.shape[0])\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n z[i] += x[i, j] * y[j]\n return z", "def _mul(*args):\n\treturn functools.reduce(numpy.dot, args)", "def m_c(self) -> np.ndarray:\n assert self._k is not None, \"camera must be calibrated\"\n return forge_projective_matrix(self._k)", "def _mps_CA(self, C, A):\n return np.tensordot(C, A, axes=(1, 0))", "def matmul():\n\n if RESULT_IN_NVRAM:\n matrix_c = ResultMatrixInDaos()\n else:\n matrix_c = ResultMatrixInMemory()\n\n # This could be trivially optimized by reordering indexes\n # and caching either a_block or b_block (assuming C in-memory).\n # *However* it would result in unfair comparisons with the \n # previous implementation used elsewhere.\n # Using the naive algorithm makes sense for a raw comparison.\n for i in range(MATRIXSIZE):\n for j in range(MATRIXSIZE):\n partial_result_block = np.zeros((BLOCKSIZE, BLOCKSIZE))\n\n for k in range(MATRIXSIZE):\n a_block = np.fromstring(\n DAOS_KV[\"A%02d%02d\" % (i, k)],\n dtype=NP_FROMSTRING_DTYPE\n ).reshape((BLOCKSIZE, BLOCKSIZE))\n\n b_block = np.fromstring(\n DAOS_KV[\"B%02d%02d\" % (k, j)],\n dtype=NP_FROMSTRING_DTYPE\n ).reshape((BLOCKSIZE, BLOCKSIZE))\n\n partial_result_block += a_block @ b_block\n \n matrix_c[i,j] = partial_result_block\n\n return matrix_c", "def _rmatvec(self, u: np.ndarray) -> np.ndarray:\n return convolve(self.x.conj()[::-1], u, mode='valid', method=self.method)", "def complex_multiplication(c1,c2,cr):\n cr[0] = c1[0]*c2[0] - c1[1]*c2[1]\n cr[1] = c1[0]*c2[1] + c1[1]*c2[0]\n return cr", "def get_C(n_c,CV_matrix):\n C = np.zeros((n_c, n_c), dtype=np.float32)\n for i in range(3):\n C += np.asfortranarray(CV_matrix[:, :, i]) @ np.asfortranarray(CV_matrix[:, :, np.mod(i + 2, 3)].T)\n C = (C != 0).astype(np.int32)\n return C", "def muscovite():\n\n rho = 2834.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 181.; C[0,1] = 48.8; C[0,2] = 25.6; C[0,3] = 0.; C[0,4] = -14.2; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 178.4; C[1,2] = 21.2; C[1,3] = 0.; C[1,4] = 1.1; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 58.6; C[2,3] = 0.; C[2,4] = 1.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 16.5; C[3,4] = 0.; C[3,5] = -5.2\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 19.5; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 72.\n\n return C, rho", "def rhs(t, conc, S_matrix, educt_matrix, kin_par):\n fluxes = calculate_fluxes(conc, S_matrix, educt_matrix, kin_par)\n return np.dot(S_matrix, fluxes)", "def cofiCostFunc(self,params, *args):\n\t\tY, R, num_users, num_products, num_features,l = args[0], args[1],args[2], args[3],args[4],args[5]\n\n\t\taux = params.reshape((num_products + num_users, num_features))\n\n\t\tX = aux[0:num_products , :]\n\n\t\tTheta = aux[num_products:, :] \n\n\t\ttest = np.dot(X,Theta.transpose())\n\t\ttest = test - Y\n\t\ttest = np.multiply(test , R)\n\t\ttest = np.power(test,2)\n\t\ttest = test.sum()\n\t\ttest = 0.5 * test\n\n\t\tJ = 0;\n\t\tregularization = (l * 0.5) * np.power(X,2).sum() + np.power(Theta,2).sum()\n\n\t\tJ = test# + regularization\n\n\t\treturn J", "def c(self, z, y, r, t):\n \n u = np.zeros( self.m ) \n \n return u", "def _set_u_matirx(self):\n c_matrix = self.get_c_matrix()\n u_matrix, d_matrix, _ = np.linalg.svd(c_matrix)\n self.u_matrix = np.matrix(u_matrix)", "def reduce_C(self, C_on_basis_vecs):\n self.C_reduced = np.mat(np.array(C_on_basis_vecs, ndmin=2))\n return self.C_reduced", "def reduce_C(self, C_on_basis_vecs):\n self.C_reduced = np.mat(np.array(C_on_basis_vecs, ndmin=2).T)\n return self.C_reduced", "def scalar_multiply(c, v):\n\treturn [c * v_i for v_i in v]", "def vectordot(a, b):\n return np.sum(a * b, 1)", "def _mps_AC(self, A, C):\n return np.tensordot(A, C, axes=(2, 0))", "def form_matrix_yt(w):\r\n M = np.zeros((len(w),len(w)))\r\n for i in range(len(w)):\r\n for j in range(len(w)):\r\n M[i,j] = YoungTableaux(w[i],w[j]).CMNR()\r\n return M", "def vect_contract(m, c, n):\n a = np.tensordot(m, c, (0, 0))\n mn = np.tensordot(a, n, (2, 0))\n return mn", "def naive_vector_dot(x, y):\n assert len(x.shape) == 1\n assert len(y.shape) == 1\n assert x.shape[0] == y.shape[0]\n\n z = 0\n for i in range(x.shape[0]):\n z += x[i] * y[i]\n return z", "def __mul__(self, othertr):\n res = self.dot(othertr)\n return res", "def _c_correlation(cls, X, y):\n su = np.zeros(X.shape[1])\n for i in np.arange(X.shape[1]):\n su[i] = cls._symmetrical_uncertainty(X[:, i], y)\n return su", "def complex_inverse(c1,cr):", "def scalar_multiply(c: float, v: Vector) -> Vector:\n return [c * v_i for v_i in v]", "def scalar_multiply(c: float, v: Vector) -> Vector:\n return [c * v_i for v_i in v]", "def _build_c_phi_matrices(self, t: tf.Tensor) -> tf.Tensor:\n c_phi_matrices = self.kernel.compute_c_phi(t, t)\\\n + tf.expand_dims(tf.eye(self.n_points_int, dtype=tf.float64), 0)\\\n * self.likelihood_variances\n return c_phi_matrices", "def dot_prod(u,v):\n each_product = []\n for i in range(len(u)):\n each_product.append(u[i] * v[i])\n return sum(each_product)", "def matrix_deflation(X_curr, Y_curr, X_orig, Y_orig, u, v):\n Xp = X_curr\n Yp = Y_curr\n #u = u / (np.sqrt(np.sum(u**2)+1e-7))\n #v = v / (np.sqrt(np.sum(v**2)+1e-7))\n\n qx = np.dot(Xp.T,np.dot(X_orig,u))\n qx = qx / (np.sqrt(np.sum(qx**2)+1e-7))\n #qx = qx.astype('float16')\n Xp = Xp - np.dot(Xp,qx).dot(qx.T)\n X = Xp.reshape(1,Xp.shape[0],Xp.shape[1])\n\n qy = np.dot(Yp.T,np.dot(Y_orig,v))\n qy = qy / (np.sqrt(np.sum(qy**2)+1e-7))\n #qy = qy.astype('float16')\n Yp = Yp - np.dot(Yp,qy).dot(qy.T)\n Y = Yp.reshape(1,Yp.shape[0],Yp.shape[1])\n\n return X, Y", "def scalar_multiply(c, v):\n return [c * v_i for v_i in v]", "def scalar_multiply(c, v):\n return [c * v_i for v_i in v]", "def mcc(self):\n tp = self.tp\n tn = self.tn\n fp = self.fp\n fn = self.fn\n return tp * tn / np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))", "def kronecker_prod(x, y):\n if len(list(x.size())) != 3 or len(list(y.size())) != 3:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(\n 2,\n x.size()[1] * y.size()[1],\n x.size()[2] * y.size()[2],\n dtype=torch.double,\n device=x.device,\n )\n\n row_count = 0\n\n for i in range(x.size()[1]):\n for k in range(y.size()[1]):\n column_count = 0\n for j in range(x.size()[2]):\n for l in range(y.size()[2]):\n\n z[0][row_count][column_count] = (x[0][i][j] * y[0][k][l]) - (\n x[1][i][j] * y[1][k][l]\n )\n z[1][row_count][column_count] = (x[0][i][j] * y[1][k][l]) + (\n x[1][i][j] * y[0][k][l]\n )\n\n column_count += 1\n row_count += 1\n\n return z", "def Q2C(self, q):\n\n #q = q.squeeze();\n C = np.empty((3,3));\n\tC[0,0] = (q[0]**2.0) + (q[1]**2.0) - (q[2]**2.0) - (q[3]**2.0);\n\tC[0,1] = 2.0 * ((q[1]*q[2]) + (q[0]*q[3]));\n\tC[0,2] = 2.0 * ((q[1]*q[3]) - (q[0]*q[2]));\n\n\tC[1,0] = 2.0 * ((q[1]*q[2]) - (q[0]*q[3]));\n\tC[1,1] = (q[0]**2.0) - (q[1]**2.0) + (q[2]**2.0) - (q[3]**2.0);\n\tC[1,2] = 2.0 * ((q[2]*q[3]) + (q[0]*q[1]));\n\n\tC[2,0] = 2.0 * ((q[1]*q[3]) + (q[0]*q[2]));\n\tC[2,1] = 2.0 * ((q[2]*q[3]) - (q[0]*q[1]));\n\tC[2,2] = (q[0]**2.0) - (q[1]**2.0) - (q[2]**2.0) + (q[3]**2.0);\n\n return C", "def scalar_mult(v, u):\n return [v[i] * u[i] for i in range(len(v))]", "def f(self, x: np.array) -> np.array:\n return self.m * x + self.c", "def p_mx_c(pm,px,py,pyx_c,pym_c,beta):\n \n pmx_c = np.zeros((pm.size,px.size)) # P(M|X) matrix to be returned\n for mi in range(pm.size):\n for xi in range(px.size):\n pmx_c[mi,xi] = pm[mi] * np.exp(-beta * entropy(pyx_c[:,xi], pym_c[:,mi], base=2))\n z = pmx_c.sum(axis=0)\n pmx_c /= z #Normalize\n \n \t\n return pmx_c, z", "def ccc_v(y_true, y_pred):\n x = y_true[:, 0]\n y = y_pred[:, 0]\n mx = K.mean(x, axis=0)\n my = K.mean(y, axis=0)\n xm, ym = x - mx, y - my\n rho = K.sum(xm * ym) / (K.sqrt(K.sum(xm ** 2)) * K.sqrt(K.sum(ym ** 2)))\n x_s = K.std(x)\n y_s = K.std(y)\n ccc = 2 * rho * x_s * y_s / (x_s ** 2 + y_s ** 2 + (mx - my) ** 2)\n return ccc", "def factor_circulant_matrix(x, k):\n n=len(x)\n return circulant(x) * (tri(n,n, 0) + k*np.transpose(tri(n,n, -1)))", "def p_m(pmx_c,px):\n pm = np.zeros(pmx_c.shape[0])\n for mi in range(pm.size):\n for xi in range(px.size):\n pm[mi] += pmx_c[mi,xi]*px[xi]\n return pm", "def alternate_ls (u_num, Y, P, C, reg):\n\n\t# get # of items/users and # of latent factors\n\t[i_num, f_num] = Y.shape\n\n\t# output buffer\n\tX = np.zeros((u_num, f_num))\n\n\t# precalculate YtY to improve the performance\n\tYtY = Y.T * Y\n\n\t# iterate over each user/item\n\tfor u in range(u_num):\n\n\t\t# store the diagonal elements of the matrix Cu discussed in the paper in a vector\n\t\tCu = C[u,:]\n\n\t\t# store the coresponding row/column of the preference matrix\n\t\tPu = P[u,:]\n\n\t\t# compute Cu-I\n\t\tCu_I = Cu - 1\n\n\t\t# calculate Yt(Cu-I)Y\n\t\tYtCu_IY = np.zeros((f_num, f_num))\n\t\tCuIY = np.multiply(Y, Cu_I.T) # weight each row of Y with Cu-I\n\t\tfor row in range(f_num):\n\t\t\tfor col in range(f_num):\n\t\t\t\tYtCu_IY[row,col] = Y[:,row].T * CuIY[:,col]\n\t\t\n\t\t# left term : ((YtCuY + regI)^(-1)) = (YtY + Yt(Cu-I)Y + regI)^(-1)\n\t\tleft_inv = YtY + YtCu_IY + reg*np.eye(f_num)\n\t\tleft = np.linalg.inv(left_inv)\n\n\t\t# right term : YtCuPu\n\t\tright = Y.T * np.multiply(Cu.T, Pu.T)\n\n\t\t# compute the latent factor of the user/item\n\t\tx = left * right\n\t\t\n\t\t# store it in a matrix\n\t\tX[u,:] = x.T\n\n\t# return an MxF or NxF matrix\n\treturn np.matrix(X)", "def matrix_multiply(x, y):\r\n\r\n # handle the base case of receiving\r\n # two empty matrices\r\n if x == [] and y == []:\r\n return []\r\n\r\n # determine the number of rows and columns in the result matrix\r\n num_rows = len(x)\r\n num_cols = len(y[0])\r\n\r\n num_cross = len(x[0])\r\n\r\n # initialize the result matrix\r\n result_matrix = [[0] * num_cols for _ in xrange(num_rows)]\r\n\r\n # compute the values for each cell of the result\r\n # matrix\r\n for row_index in xrange(num_rows):\r\n for col_index in xrange(num_cols):\r\n\r\n # sum up the corresponding values from\r\n # x and y\r\n for multiplication_index in xrange(num_cross):\r\n\r\n x_value = x[row_index][multiplication_index]\r\n y_value = y[multiplication_index][col_index]\r\n\r\n result_matrix[row_index][col_index] += x_value * y_value\r\n\r\n return result_matrix", "def matrix_dot(*args):\r\n rval = args[0]\r\n for a in args[1:]:\r\n rval = theano.tensor.dot(rval, a)\r\n return rval", "def product_moment(*args, **kwargs):\n return ConfusionMatrix2.from_ccw(*args, **kwargs).matthews_corr()", "def _calc_C(self, lambdify=True):\n\n C = None\n C_func = None\n # check to see if we have our term saved in file\n C, C_func = self._load_from_file('C', lambdify)\n\n if C is None and C_func is None:\n # if no saved file was loaded, generate function\n print('Generating centrifugal and Coriolis compensation function')\n\n # first get the inertia matrix\n M = self._calc_M(lambdify=False)\n\n # C_{kj} = sum_i c_{ijk}(q) \\dot{q}_i\n # c_{ijk} = 1/2 * sum_i (\\frac{\\partial M_{kj}}{\\partial q_j} +\n # \\frac{\\partial M_{ki}}{\\partial q_j} - \\frac{\\partial M_{ij}}\n # {\\partial q_k})\n C = sp.zeros(self.N_JOINTS, self.N_JOINTS)\n for kk in range(self.N_JOINTS):\n for jj in range(self.N_JOINTS):\n for ii in range(self.N_JOINTS):\n dMkjdqi = M[kk, jj].diff(self.q[ii])\n dMkidqj = M[kk, ii].diff(self.q[jj])\n dMijdqk = M[ii, jj].diff(self.q[kk])\n C[kk, jj] += .5 * (dMkjdqi + dMkidqj - dMijdqk) * self.dq[ii]\n C[kk, jj] = C[kk, jj]\n C = sp.Matrix(C)\n\n # save to file\n abr_control.utils.os_utils.makedirs(\n '%s/C' % self.config_folder)\n cloudpickle.dump(C, open(\n '%s/C/C' % self.config_folder, 'wb'))\n\n if lambdify is False:\n # if should return expression not function\n return C\n\n if C_func is None:\n C_func = self._generate_and_save_function(\n filename='C', expression=C,\n parameters=self.q+self.dq)\n return C_func", "def __mul__(self, other):\n if isinstance(other, Vector):\n # Matrix vector product\n v = Vector(list())\n for n in range(len(other.vectors)):\n v += scale(other.vectors[n][n], self.vectors[n])\n return v\n elif isinstance(other, Matrix):\n # Matrix matrix product\n if self.n != other.m:\n raise ValueError(\"Wrong fucking sizes, nøøb\")\n\n selfVectors = self.vectors\n selfColVectors = self.transpose()\n otherVectors = other.vectors\n otherColVectors = other.transpose()\n vectors = list()\n for col in range(other.n):\n cordinator = []\n\n for row in range(self.m):\n coord = 0\n\n for k in range(other.m):\n coord += (\n selfVectors[row].coords[k]\n * otherColVectors.vectors[col].coords[k]\n )\n\n cordinator.append(coord)\n\n v = Vector(cordinator)\n vectors.append(v)\n matrix = Matrix(vectors)\n matrix = matrix.transpose()\n return matrix\n elif isinstance(other, int) or isinstance(other, float): # Skalering af matrix\n for i in range(len(self.vectors)):\n self.vectors[i] *= other\n else:\n raise ValueError(\n \"Can only multiply Matrix with Matrix, Vector, Integer or Float\"\n )", "def c_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope):\n\tC = c_matrix(x1,x2,x3)\n\ty = y_vector(x1,x2,x3,y1,y2,y3,initial_slope,final_slope)\n\tCCoefficients = np.dot(inv(C),y)\n\treturn(CCoefficients)", "def c( self , y , r , t = 0 ):\n \n u = np.zeros(self.m) # State derivative vector\n \n raise NotImplementedError\n \n return u", "def my_matmul(x, y):\n ##\n cmd = getattr(th, \"matmul\")\n x1, x2 = my_cut(x)\n y1, y2 = my_cut(y)\n x2y1 = cmd(x2, y1)\n x1y2 = cmd(x1, y2)\n x2y2 = cmd(x2, y2)\n ret = (x2y1 + x1y2) % int24field * int24field + x2y2\n ret = int48module(ret)\n return ret", "def outer_product(x,y):\n\n return x[:,0]*y[:,1] -x[:,1]*y[:,0]", "def lazy_matrix_mul(m_a, m_b):\n return np.dot(m_a, m_b)", "def matmul(xs: List[List[float]],\n ys: List[List[float]]) -> List[List[float]]:\n product = []\n for x_row in range(len(xs)):\n row = []\n for y_col in range(len(ys[0])):\n col = [ys[y_row][y_col] for y_row in range(len(ys))]\n row.append(Math.dot(xs[x_row], col))\n product.append(row)\n return product", "def compute_factor(X, v, c1, c2):\n\n assert np.shape(v)[1] == 1,\"v is not a column vector\"\n\n v = normalize_l2(v)\n\n sz_u = np.shape(X)[0]\n sz_v = np.shape(X)[1]\n\n assert sz_v == np.size(v)\n\n u = update_with_delta(X @ v, c1)\n v = update_with_delta(X.T @ u, c2)\n\n delta_u = 1000\n delta_v = 1000\n\n while delta_u > 1e-5 or delta_v > 1e-5:\n oldU = u\n oldV = v\n\n u = update_with_delta(X @ v, c1)\n v = update_with_delta(X.T @ u, c2)\n\n delta_u = npla.norm(u - oldU) / sz_u\n delta_v = npla.norm(v - oldV) / sz_v\n\n d = u.T @ X @ v\n\n return (d,u,v)", "def matrix_multiplication_loop(x_matrix, y_matrix):\n result = []\n for i, row in enumerate(x_matrix):\n row_vector = []\n for j in range(len(y_matrix[0])):\n product = 0\n for k in range(len(row)):\n product += x_matrix[i][k] * y_matrix[k][j]\n row_vector.append(product)\n result.append(row_vector)\n return result", "def mul(Z,X,Y):", "def Cvec(self):\n return vec(self.xc, self.yc)", "def test03(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n if self.rootdir:\n dirc, dird = self.rootdir+'.c', self.rootdir+'.d'\n else:\n dirc, dird = None, None\n c = bcolz.carray(a, rootdir=dirc)\n d = bcolz.carray(b, rootdir=dird)\n cr = bcolz.eval(\"a * d\")\n nr = a * b\n # print \"bcolz.eval ->\", cr\n # print \"numpy ->\", nr\n assert_array_equal(cr[:], nr, \"eval does not work correctly\")", "def alg(c):\n return c[0]*G[0] + c[1]*G[1] + c[2]*G[2]", "def calculate_xi(self, postJ):\n # get output of rec model\n self.batch_mu = self.mu_net(postJ)\n self.batch_u = self.u_net(postJ)\n self.batch_unc_d = self.unc_d_net(postJ)\n\n # add extra dim to batch_u, so it gets treated as column vectors when\n # iterated over\n\n self.batch_u = tf.expand_dims(self.batch_u, -1)\n\n def get_cov(acc, inputs):\n # convert output of rec model to rank-1 covariance matrix\n\n # use softplus to get positive constrained d, minimum of -15\n # since softplus will turn low numbers into 0, which become NaNs\n # when inverted\n u, unc_d = inputs\n d = tf.nn.softplus(tf.maximum(unc_d, -15.0))\n D_inv = tf.diag(1.0 / d)\n eta = 1.0 / (tf.matmul(tf.matmul(tf.transpose(u), D_inv), u) + 1.0)\n C = D_inv - eta*tf.matmul(tf.matmul(tf.matmul(D_inv, u),\n tf.transpose(u)), D_inv)\n Tr_C = tf.trace(C)\n ld_C = tf.log(eta) - tf.reduce_sum(tf.log(d)) # eq 20 in DLGM\n # coeff = ((1 - T.sqrt(eta)) / (u.T.dot(D_inv).dot(u)))\n # simplified coefficient below is more stable as u -> 0\n # original coefficient from paper is above\n coeff = eta / (1.0 + tf.sqrt(eta))\n R = (tf.sqrt(D_inv) - coeff * tf.matmul\n (tf.matmul(tf.matmul(D_inv, u), tf.transpose(u)),\n tf.sqrt(D_inv)))\n return Tr_C, ld_C, R\n\n (self.batch_Tr_C, self.batch_ld_C, self.batch_R) = tf.scan(\n get_cov, [self.batch_u, self.batch_unc_d],\n initializer=(0.0, tf.zeros([1, 1]), tf.diag(self.batch_unc_d[0])))\n\n self.batch_xi = (self.batch_mu +\n (tf.squeeze(tf.matmul(self.batch_R,\n (tf.expand_dims(tf.random_normal(\n [tf.shape(self.batch_R)[0],\n self.num_units]), -1))))))", "def circumcenter(C):\n ri, rj, rk = C.transpose(1,2,0)\n ax, ay = ri\n bx, by = rj\n cx, cy = rk\n d = 2 * (ax * (by - cy) + bx * (cy - ay) + cx * (ay - by))\n ux = ((ax * ax + ay * ay) * (by - cy) + (bx * bx + by * by) * (cy - ay) + (cx * cx + cy * cy) * (\n ay - by)) / d\n uy = ((ax * ax + ay * ay) * (cx - bx) + (bx * bx + by * by) * (ax - cx) + (cx * cx + cy * cy) * (\n bx - ax)) / d\n vs = np.empty((ax.size,2),dtype=np.float64)\n vs[:,0],vs[:,1] = ux,uy\n return vs", "def cc_coefficient(x, y):\n cor = np.sum( (x-np.mean(x)) * (y-np.mean(y)) )\n norm = sqrt( np.sum((x-np.mean(x))**2) * np.sum((x-np.mean(x))**2) )\n r = cor/norm\n return r", "def Cijkl(C):\n c = np.zeros(shape=(3, 3, 3, 3))\n CC = np.zeros(shape=(9, 9))\n CC[0:6, 0:6] = C[0:6, 0:6]\n CC[6:9, 6:9] = C[3:6, 3:6]\n CC[0:6, 6:9] = C[0:6, 3:6]\n CC[6:9, 0:6] = C[3:6, 0:6]\n\n c[0, 0, 0, 0] = CC[0, 0]\n c[0, 0, 1, 1] = CC[0, 1]\n c[0, 0, 2, 2] = CC[0, 2]\n c[0, 0, 1, 2] = CC[0, 3]\n c[0, 0, 2, 0] = CC[0, 4]\n c[0, 0, 0, 1] = CC[0, 5]\n c[0, 0, 2, 1] = CC[0, 6]\n c[0, 0, 0, 2] = CC[0, 7]\n c[0, 0, 1, 0] = CC[0, 8]\n\n c[1, 1, 0, 0] = CC[1, 0]\n c[1, 1, 1, 1] = CC[1, 1]\n c[1, 1, 2, 2] = CC[1, 2]\n c[1, 1, 1, 2] = CC[1, 3]\n c[1, 1, 2, 0] = CC[1, 4]\n c[1, 1, 0, 1] = CC[1, 5]\n c[1, 1, 2, 1] = CC[1, 6]\n c[1, 1, 0, 2] = CC[1, 7]\n c[1, 1, 1, 0] = CC[1, 8]\n\n c[2, 2, 0, 0] = CC[2, 0]\n c[2, 2, 1, 1] = CC[2, 1]\n c[2, 2, 2, 2] = CC[2, 2]\n c[2, 2, 1, 2] = CC[2, 3]\n c[2, 2, 2, 0] = CC[2, 4]\n c[2, 2, 0, 1] = CC[2, 5]\n c[2, 2, 2, 1] = CC[2, 6]\n c[2, 2, 0, 2] = CC[2, 7]\n c[2, 2, 1, 0] = CC[2, 8]\n\n c[1, 2, 0, 0] = CC[3, 0]\n c[1, 2, 1, 1] = CC[3, 1]\n c[1, 2, 2, 2] = CC[3, 2]\n c[1, 2, 1, 2] = CC[3, 3]\n c[1, 2, 2, 0] = CC[3, 4]\n c[1, 2, 0, 1] = CC[3, 5]\n c[1, 2, 2, 1] = CC[3, 6]\n c[1, 2, 0, 2] = CC[3, 7]\n c[1, 2, 1, 0] = CC[3, 8]\n\n c[2, 0, 0, 0] = CC[4, 0]\n c[2, 0, 1, 1] = CC[4, 1]\n c[2, 0, 2, 2] = CC[4, 2]\n c[2, 0, 1, 2] = CC[4, 3]\n c[2, 0, 2, 0] = CC[4, 4]\n c[2, 0, 0, 1] = CC[4, 5]\n c[2, 0, 2, 1] = CC[4, 6]\n c[2, 0, 0, 2] = CC[4, 7]\n c[2, 0, 1, 0] = CC[4, 8]\n\n c[0, 1, 0, 0] = CC[5, 0]\n c[0, 1, 1, 1] = CC[5, 1]\n c[0, 1, 2, 2] = CC[5, 2]\n c[0, 1, 1, 2] = CC[5, 3]\n c[0, 1, 2, 0] = CC[5, 4]\n c[0, 1, 0, 1] = CC[5, 5]\n c[0, 1, 2, 1] = CC[5, 6]\n c[0, 1, 0, 2] = CC[5, 7]\n c[0, 1, 1, 0] = CC[5, 8]\n\n c[2, 1, 0, 0] = CC[6, 0]\n c[2, 1, 1, 1] = CC[6, 1]\n c[2, 1, 2, 2] = CC[6, 2]\n c[2, 1, 1, 2] = CC[6, 3]\n c[2, 1, 2, 0] = CC[6, 4]\n c[2, 1, 0, 1] = CC[6, 5]\n c[2, 1, 2, 1] = CC[6, 6]\n c[2, 1, 0, 2] = CC[6, 7]\n c[2, 1, 1, 0] = CC[6, 8]\n\n c[0, 2, 0, 0] = CC[7, 0]\n c[0, 2, 1, 1] = CC[7, 1]\n c[0, 2, 2, 2] = CC[7, 2]\n c[0, 2, 1, 2] = CC[7, 3]\n c[0, 2, 2, 0] = CC[7, 4]\n c[0, 2, 0, 1] = CC[7, 5]\n c[0, 2, 2, 1] = CC[7, 6]\n c[0, 2, 0, 2] = CC[7, 7]\n c[0, 2, 1, 0] = CC[7, 8]\n\n c[1, 0, 0, 0] = CC[8, 0]\n c[1, 0, 1, 1] = CC[8, 1]\n c[1, 0, 2, 2] = CC[8, 2]\n c[1, 0, 1, 2] = CC[8, 3]\n c[1, 0, 2, 0] = CC[8, 4]\n c[1, 0, 0, 1] = CC[8, 5]\n c[1, 0, 2, 1] = CC[8, 6]\n c[1, 0, 0, 2] = CC[8, 7]\n c[1, 0, 1, 0] = CC[8, 8]\n return c", "def mvector(B, c):\n # for Sun Mg Potential: c=1.6281689374348\n A = np.zeros(shape=4)\n A[0] = (2 / 3) * B[0]\n A[1] = 0.5 * ((2 / sqrt(3)) * B[1] - A[0])\n A[2] = -A[0] - A[1]\n A[3] = B[2] / c\n return A", "def complex_mul(x1, x2):\n assert x1.size(-1) == 2 and x2.size(-1) == 2\n\n res = torch.stack(\n (x1[..., 0]*x2[..., 0]-x1[..., 1]*x2[..., 1],\n x1[..., 0]*x2[..., 1] + x1[..., 1]*x2[..., 0]), -1)\n\n return res", "def evaluate_c(self, x, out=None, **kwargs):\n return np.zeros(0)", "def build_cooc_matrix(users):\n nprods = constants.N_PRODUCTS\n M = scipy.sparse.dok_matrix((nprods, nprods), dtype=np.int32)\n i = 0\n for user in users:\n order = user.orders[-1]\n for pid in user.sorted_pids:\n focal_ix = pid-1\n prevs = paired_pids(user, pid)\n for prev in prevs:\n key = (focal_ix, prev-1)\n #n = M.get(key, 0)\n # further centi-optimization\n n = dict.get(M, key, 0)\n M.update({key:n+1})\n # Above is like 5x faster than below (and this inner loop is current bottleneck)\n #M[focal_ix, prev-1] += 1\n i += 1\n if i % 10000 == 0:\n logging.info('Processed {} users'.format(i))\n\n return M", "def matTimesVec(M, x):\n return [dot(m, x) for m in M]", "def __mul__(self, other): \n if isinstance(other, Iterable):\n # dot product\n return self.x * other[0] + self.y * other[1]\n else:\n # scalar product\n return Vector(self.x * other, self.y * other)", "def product1(a, b, c) :\n return a * b * c", "def classical(m1,m2):\n \n n = m1.shape\n result = np.zeros(n, dtype = int)\n\n for i in range(n[0]):\n for j in range(n[0]):\n for k in range(n[0]):\n result[i][j] += m1[i][k] * m2[k][j]\n return result", "def abc_matrix(a, b, c):\n ax = np.linalg.norm(a)\n a_hat = a/ax\n bx = np.dot(b, a_hat)\n by = np.linalg.norm(np.cross(a_hat, b))\n cx = np.dot(c, a_hat)\n axb = np.cross(a,b)\n axb_hat = axb / np.linalg.norm(axb)\n cy = np.dot(c, np.cross(axb_hat, a_hat))\n cz = np.dot(c, axb_hat)\n return np.array([[ax, bx, cx],[0, by, cy],[0 , 0, cz]])", "def coproduct(self, element):\n from sage.categories.tensor import tensor\n base = element.lift().parent()\n return self.tensor_square().sum(coeff * tensor([self(base[x]), self(base[y])])\n for ((x,y), coeff) in element.lift().coproduct())", "def mbvector(A, c=sqrt(8 / 3)):\n la = len(A)\n sa = A.size\n if la == sa:\n B = np.array([0.0, 0.0, 0.0])\n a1 = A[0] * np.array([1.0, 0.0])\n a2 = A[1] * np.array([-0.5, 0.5 * sqrt(3)])\n a3 = A[2] * np.array([-0.5, -0.5 * sqrt(3)])\n B[0] = a1[0] + a2[0] + a3[0]\n B[1] = a1[1] + a2[1] + a3[1]\n B[2] = c * A[3]\n else:\n sa = A.shape\n B = np.zeros(shape=(sa[0], 3))\n for i in range(sa[0]):\n B[i, 0] = a1[0] + a2[0] + a3[0]\n B[i, 1] = a1[1] + a2[1] + a3[1]\n B[i, 2] = c * A[i, 3]\n return B", "def compound_dot(self, A, B, C, alpha=1.0, beta=0.0, relu=False, bsum=None):\n\n # checking type and shape\n assert A.dtype == B.dtype == C.dtype\n assert A.shape[0] == C.shape[0]\n assert B.shape[1] == C.shape[1]\n assert A.shape[1] == B.shape[0]\n\n # cleaner implementation, shall be equivalent to the one below\n # if relu:\n # C[:] = self.log(1. + self.exp(alpha * self.dot(A, B))) + beta * C\n # else:\n # C[:] = alpha * self.dot(A, B) + beta * C\n\n if beta == 0:\n if C._tensor.flags['C_CONTIGUOUS'] is not True:\n tmp = np.empty(C.shape, dtype=C.dtype)\n math_cpu.blas_dot(A._tensor, B._tensor, tmp)\n C._tensor[:] = tmp.copy()\n else:\n math_cpu.blas_dot(A._tensor, B._tensor, C._tensor)\n if relu:\n self.Relu(C._tensor, C._tensor)\n else:\n # mfma: change np.multiply to mul\n if beta != 1:\n np.multiply(C._tensor, beta, C._tensor)\n tmp = np.empty(C.shape, dtype=C.dtype)\n np.dot(A._tensor, B._tensor, tmp)\n # mfma: change np.multiply to mul\n if alpha != 1:\n np.multiply(tmp, alpha, tmp)\n if relu:\n self.Relu(tmp, tmp)\n np.add(C._tensor, tmp, C._tensor)\n if bsum is not None:\n bsum[:] = self.sum(C, 1)\n\n return C", "def __matmul__(self, tensor):\n return self.matmul(tensor)", "def __mul__(self, other):\n #\n # TODO - your code here\n #\n final_matrix = []\n for i in range(self.h):\n temp_row = []\n for j in range(other.w):\n # take dot-product of row of\n # matrix in 1st arg with col of\n # matrix in 2nd arg\n temp_row.append(dot_product(get_row(self.g, i), get_col(other.g, j)))\n final_matrix.append(temp_row)\n return Matrix(final_matrix)\n # TODO - your code here", "def least_squares(Cui, X, Y, regularization, num_threads):\n users, factors = X.shape\n YtY = Y.T.dot(Y)\n\n for u in range(users):\n # accumulate YtCuY + regularization*I in A\n A = YtY + regularization * np.eye(factors)\n\n # accumulate YtCuPu in b\n b = np.zeros(factors)\n\n for i, confidence in nonzeros(Cui, u):\n factor = Y[i]\n A += (confidence - 1) * np.outer(factor, factor)\n b += confidence * factor\n\n # Xu = (YtCuY + regularization * I)^-1 (YtCuPu)\n X[u] = np.linalg.solve(A, b)", "def compCoeff_CGP(i, A, c, N):\n Ap = np.copy(A)\n out = c[i, 0] * np.eye(N)\n j = 1\n while j <= i:\n # compute A to the power p\n if j > 1:\n Ap = Ap.dot(A)\n\n # add to the polynome\n out += c[i, j] * Ap\n j += 1\n\n return out", "def compute_coriolis(self):\r\n # compute the Coriolis force\r\n self.coriolis.assign(\r\n project(-2*self.rho*cross(self.omega, self.u), self.V))", "def _generate_mult_process(X, mat, inits):\n M = np.empty_like(X, dtype=float)\n M[..., 0] = inits[X[..., 0]]\n M[..., 1:] = mat[X[..., :-1], X[..., 1:]]\n np.cumprod(M, axis=-1, out=M)\n return M", "def compute_operator(self, Xc, Yc):\n\n U, s, V = self._compute_svd(Xc)\n\n self._Atilde = (np.linalg.multi_dot([U.T.conj(), (Yc), (V)])\n * np.reciprocal(s))\n\n self._compute_eigenquantities()\n self._compute_modes(Yc, U, s, V)\n\n self._slow_modes = (np.abs(old_div(np.log(self.eigenvalues),\n self._eigs_divider))) <= self._rho", "def u(self,c,x):\r\n alpha = self.alpha ; sigma = self.sigma\r\n \r\n ctilde = c - alpha*x\r\n u = ctilde**(1-sigma) / (1-sigma)\r\n \r\n return u" ]
[ "0.650418", "0.650212", "0.6441079", "0.6313763", "0.6310517", "0.62949276", "0.62782884", "0.62631303", "0.61975265", "0.6096459", "0.608041", "0.606508", "0.6038961", "0.6011421", "0.60068315", "0.59920776", "0.59303707", "0.58836865", "0.5879482", "0.58772385", "0.58575416", "0.5838892", "0.58091784", "0.5796622", "0.57843477", "0.57586485", "0.57561576", "0.57366264", "0.5728224", "0.57246524", "0.572282", "0.57148993", "0.57086194", "0.5698373", "0.5695539", "0.5695106", "0.569498", "0.5687259", "0.56838983", "0.567735", "0.566609", "0.5664836", "0.5649978", "0.5649978", "0.5646444", "0.56459975", "0.5616804", "0.5613991", "0.5613991", "0.56069696", "0.5602995", "0.56016886", "0.5601508", "0.5595393", "0.5594796", "0.55833477", "0.5577543", "0.55557126", "0.55539906", "0.5553841", "0.5552608", "0.55387753", "0.55368805", "0.55332106", "0.5529269", "0.5527718", "0.5523153", "0.55210274", "0.5515821", "0.55033433", "0.55023336", "0.5484248", "0.54813796", "0.5480753", "0.5479537", "0.5474091", "0.546962", "0.54678774", "0.54670525", "0.5465292", "0.5459378", "0.5458223", "0.5457664", "0.54558104", "0.54443145", "0.54395616", "0.5437733", "0.543512", "0.54349375", "0.543476", "0.54323757", "0.5431858", "0.5431346", "0.54254824", "0.5424391", "0.54220825", "0.54190916", "0.5415009", "0.5414828", "0.5412295" ]
0.6389226
3
Compute the matrixvector product y = Tu where T is a Toeplitz matrix All matrices are real
def toeplitz_multiplication(u, c, r=None): n = len(u) if r is None: r = c u1 = zeros((2*n)) u1[0:n] = u c = np.concatenate((c, [0], r[-1:0:-1])) y1 = circulant_multiplication(u1, c) return y1[0:n]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matrix_vector_prod(m,u):\n each_product = []\n for v in m:\n each_product.append(dot_prod(v, u))\n return each_product", "def matmul(x, y):\n if len(list(y.size())) == 2:\n # if one of them is a vector (i.e. wanting to do MV mult)\n z = torch.zeros(2, x.size()[1], dtype=torch.double, device=x.device)\n z[0] = torch.mv(x[0], y[0]) - torch.mv(x[1], y[1])\n z[1] = torch.mv(x[0], y[1]) + torch.mv(x[1], y[0])\n\n if len(list(y.size())) == 3:\n z = torch.zeros(\n 2, x.size()[1], y.size()[2], dtype=torch.double, device=x.device\n )\n z[0] = torch.matmul(x[0], y[0]) - torch.matmul(x[1], y[1])\n z[1] = torch.matmul(x[0], y[1]) + torch.matmul(x[1], y[0])\n\n return z", "def __matmul__(self, q: np.ndarray) -> np.ndarray:\n return self.product(q)", "def toeplitz_inverse_multiplication(u, x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4):\n\n y = fft(D_phi*u)\n a = Lambda_1*fft(D_psi*(1/D_phi)*ifft(Lambda_2*y))\n b = Lambda_3*fft(D_psi*(1/D_phi)*ifft(Lambda_4*y))\n y = (1/D_psi)*real(ifft(a-b))/(x_0*(phi-psi))\n \n return y", "def matmul(x, y):\n return np.matmul(x, y)", "def _mul(*args):\n\treturn functools.reduce(numpy.dot, args)", "def bd_toeplitz_inverse_multiplication(u, *arrs):\n \n y = zeros(shape(u))\n n_start = 0\n n_end = 0\n for t in arrs:\n n_start = n_end\n n_end += len(t[3]) # len(t[3]) is the length of the block\n y[n_start:n_end] = toeplitz_inverse_multiplication(u[n_start:n_end], *t)\n assert len(y) == n_end\n return y", "def test_matrix_product(self, use_cache):\n\n key = jrandom.PRNGKey(0)\n dim = 50\n max_power = 25\n\n matrix = jrandom.normal(key, (dim, dim)) / 10\n vector = jnp.ones((dim,), dtype=jnp.float32)\n\n if use_cache:\n mpstate = model_utils.CachedMatrixPowerState.precompute(matrix, max_power)\n else:\n mpstate = model_utils.LazyMatrixPowerState(matrix)\n\n for t in range(max_power):\n result = mpstate.matrix_power_multiply(vector, t)\n expected = np.linalg.matrix_power(matrix, t) @ vector\n\n np.testing.assert_array_almost_equal(result, expected, decimal=1)", "def mul(Z,X,Y):", "def _dot_product_attention_inner_relative(x, y, z, transpose):\n batch_size, heads, length, _ = x.size()\n\n # xy_matmul is [batch_size, heads, length, length or depth]\n xy_matmul = torch.matmul(x, y if not transpose else y.transpose(-2, -1))\n # x_t is [length, batch_size, heads, length or depth]\n x_t = x.permute(2, 0, 1, 3)\n # x_t_r is [length, batch_size * heads, length or depth]\n x_t_r = x_t.view(length, batch_size * heads, -1)\n # x_tz_matmul is [length, batch_size * heads, length or depth]\n x_tz_matmul = torch.matmul(x_t_r, z if not transpose else z.transpose(-2, -1))\n # x_tz_matmul_r is [length, batch_size, heads, length or depth]\n x_tz_matmul_r = x_tz_matmul.view(length, batch_size, heads, -1)\n # x_tz_matmul_r_t is [batch_size, heads, length, length or depth]\n x_tz_matmul_r_t = x_tz_matmul_r.permute(1, 2, 0, 3)\n\n return xy_matmul + x_tz_matmul_r_t", "def _z2matvecmul(self, mat, vec):\n prod = np.mod(np.dot(mat, vec), 2)\n return prod", "def inner_prod(x, y):\n z = torch.zeros(2, dtype=torch.double, device=x.device)\n\n if len(list(x.size())) == 2 and len(list(y.size())) == 2:\n z[0] = torch.dot(x[0], y[0]) - torch.dot(-x[1], y[1])\n z[1] = torch.dot(x[0], y[1]) + torch.dot(-x[1], y[0])\n\n if len(list(x.size())) == 1 and len(list(y.size())) == 1:\n z[0] = (x[0] * y[0]) - (-x[1] * y[1])\n z[1] = (x[0] * y[1]) + (-x[1] * y[0])\n\n return z", "def matvec(self, x):\n return self * x", "def _z2matmul(self, left, right):\n prod = np.mod(np.dot(left, right), 2)\n return prod", "def naive_matrix_vector_dot(x, y):\n assert len(x.shape) == 2\n assert len(y.shape) == 1\n assert x.shape[1] == y.shape[0]\n\n z = np.zeros(x.shape[0])\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n z[i] += x[i, j] * y[j]\n return z", "def dot_prod(u,v):\n each_product = []\n for i in range(len(u)):\n each_product.append(u[i] * v[i])\n return sum(each_product)", "def outer_product(x,y):\n\n return x[:,0]*y[:,1] -x[:,1]*y[:,0]", "def matmul(xs: List[List[float]],\n ys: List[List[float]]) -> List[List[float]]:\n product = []\n for x_row in range(len(xs)):\n row = []\n for y_col in range(len(ys[0])):\n col = [ys[y_row][y_col] for y_row in range(len(ys))]\n row.append(Math.dot(xs[x_row], col))\n product.append(row)\n return product", "def toeplitz_inverse_multiplication_prep(T_column):\n \n phi=1\n psi=2\n assert phi != 0\n assert psi != 0\n assert phi != psi\n \n n = len(T_column)\n \n x = levinson(T_column, np.concatenate( (np.array([1]), np.zeros((n-1,))) ) )\n y = levinson(T_column, np.concatenate( (np.zeros((n-1,)), np.array([1])) ) )\n\n \n \n x_0 = x[0]\n \n D_phi = (phi**(1/n))**np.arange(0,n)\n D_psi = (psi**(1/n))**np.arange(0,n)\n\n Lambda_1 = fft(D_psi*x)\n Lambda_2 = fft(D_phi*np.concatenate(([phi*y[-1]], y[0:-1])))\n Lambda_3 = fft(D_psi*np.concatenate(([psi*y[-1]], y[0:-1])))\n Lambda_4 = fft(D_phi*x)\n \n return (x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4)", "def apply(self,v):\n return np.tensordot(self._transform, v, axes=([1],[0])) \\\n + self._translation", "def __mul__(self, othertr):\n res = self.dot(othertr)\n return res", "def vectordot(a, b):\n return np.sum(a * b, 1)", "def __matmul__(self, tensor):\n return self.matmul(tensor)", "def matmul(x, y, _pub):\n if x.shape[-1] != y.shape[-2]:\n pass # TODO: REPORT ERROR\n res = paillier_gpu.matmul_impl(x.flatten(), y.flatten(order='F'), x.shape, y.shape)\n\n return res", "def mat(self) -> np.ndarray:\n Tp = ToeplitzificationOperator(P=self.P, M=self.M, dtype=self.x.dtype)\n return Tp.matvec(self.x)", "def scalar_mult(v, u):\n return [v[i] * u[i] for i in range(len(v))]", "def simple_doct_product(u, v):\n v = [i / (sum(v)) for i in v]\n\n return np.dot(u, v)", "def vdot(x, v, pub):\n x_flatten = x.flatten()\n v_flatten = v.flatten()\n mul_res = paillier_gpu.mul_impl(v_flatten, x_flatten)\n\n return paillier_gpu.sum_impl(mul_res)", "def matrix_dot(*args):\r\n rval = args[0]\r\n for a in args[1:]:\r\n rval = theano.tensor.dot(rval, a)\r\n return rval", "def naive_vector_dot(x, y):\n assert len(x.shape) == 1\n assert len(y.shape) == 1\n assert x.shape[0] == y.shape[0]\n\n z = 0\n for i in range(x.shape[0]):\n z += x[i] * y[i]\n return z", "def __mul__(left, right):\n \n if isinstance(left, Plucker) and isinstance(right, Plucker):\n # reciprocal product\n return np.dot(left.uw, right.v) + np.dot(right.uw, left.v)\n elif isinstance(left, Plucker) and arg.ismatrix(right, (4,None)):\n return left.skew @ right; # postmultiply by 4xN", "def unwhiten(self, U, A, m):\n X = np.matmul(A, U.T).T\n X += m\n\n return X", "def RHS(y,t):\r\n\r\n return np.multiply(A.dot(y),ones-y)-beta*y", "def _multiply_matrix(self, v):\n\n self.inputs.grad.data.zero_()\n\n with torch.no_grad():\n v_features = self.lpips_model.features(self.inputs.detach() +\n self.h * v)\n D_phi_v = (\n normalize_flatten_features(v_features) -\n self.input_features\n ) / self.h\n\n torch.sum(self.input_features * D_phi_v).backward(retain_graph=True)\n\n return self.inputs.grad.data.clone()", "def transforms_multiply(t0s, t1s):\r\n \r\n return ut.matrix_multiply(t0s, t1s)", "def matrix_deflation(X_curr, Y_curr, X_orig, Y_orig, u, v):\n Xp = X_curr\n Yp = Y_curr\n #u = u / (np.sqrt(np.sum(u**2)+1e-7))\n #v = v / (np.sqrt(np.sum(v**2)+1e-7))\n\n qx = np.dot(Xp.T,np.dot(X_orig,u))\n qx = qx / (np.sqrt(np.sum(qx**2)+1e-7))\n #qx = qx.astype('float16')\n Xp = Xp - np.dot(Xp,qx).dot(qx.T)\n X = Xp.reshape(1,Xp.shape[0],Xp.shape[1])\n\n qy = np.dot(Yp.T,np.dot(Y_orig,v))\n qy = qy / (np.sqrt(np.sum(qy**2)+1e-7))\n #qy = qy.astype('float16')\n Yp = Yp - np.dot(Yp,qy).dot(qy.T)\n Y = Yp.reshape(1,Yp.shape[0],Yp.shape[1])\n\n return X, Y", "def u_t(self):\n\t\tdim = self.dim \n\t\ttim_all = self.tim_all\n\t\t#ctrl = self.ctrl\n\t\tH0 = self.H0\n\t\tHctrl = self.Hctrl\n\n\t\tu_all = np.zeros((tim_all+1,dim,dim),dtype = complex)\n\t\tu_all[0,:,:] = np.eye(dim)\n\n\t\tfor tim in xrange(tim_all):\n\t\t\tH = H0 + Hctrl[tim]#np.matrix( ctrl[i,tim] * np.array(Hctrl[i]))\n\t\t\tu_all[tim+1,:,:] = np.dot(self.u_dt(H,tim), u_all[tim,:,:])\n\n\n\t\treturn u_all", "def kronecker_prod(x, y):\n if len(list(x.size())) != 3 or len(list(y.size())) != 3:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(\n 2,\n x.size()[1] * y.size()[1],\n x.size()[2] * y.size()[2],\n dtype=torch.double,\n device=x.device,\n )\n\n row_count = 0\n\n for i in range(x.size()[1]):\n for k in range(y.size()[1]):\n column_count = 0\n for j in range(x.size()[2]):\n for l in range(y.size()[2]):\n\n z[0][row_count][column_count] = (x[0][i][j] * y[0][k][l]) - (\n x[1][i][j] * y[1][k][l]\n )\n z[1][row_count][column_count] = (x[0][i][j] * y[1][k][l]) + (\n x[1][i][j] * y[0][k][l]\n )\n\n column_count += 1\n row_count += 1\n\n return z", "def product_on_basis(self, t1, t2):\n return tensor( (module.monomial(x1)*module.monomial(x2) for (module, x1, x2) in zip(self._sets, t1, t2)) ) #.", "def outer_prod(x, y):\n if len(list(x.size())) != 2 or len(list(y.size())) != 2:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(2, x.size()[1], y.size()[1], dtype=torch.double, device=x.device)\n z[0] = torch.ger(x[0], y[0]) - torch.ger(x[1], -y[1])\n z[1] = torch.ger(x[0], -y[1]) + torch.ger(x[1], y[0])\n\n return z", "def __mul__(self, other):\n if isinstance(other, Vector):\n # Matrix vector product\n v = Vector(list())\n for n in range(len(other.vectors)):\n v += scale(other.vectors[n][n], self.vectors[n])\n return v\n elif isinstance(other, Matrix):\n # Matrix matrix product\n if self.n != other.m:\n raise ValueError(\"Wrong fucking sizes, nøøb\")\n\n selfVectors = self.vectors\n selfColVectors = self.transpose()\n otherVectors = other.vectors\n otherColVectors = other.transpose()\n vectors = list()\n for col in range(other.n):\n cordinator = []\n\n for row in range(self.m):\n coord = 0\n\n for k in range(other.m):\n coord += (\n selfVectors[row].coords[k]\n * otherColVectors.vectors[col].coords[k]\n )\n\n cordinator.append(coord)\n\n v = Vector(cordinator)\n vectors.append(v)\n matrix = Matrix(vectors)\n matrix = matrix.transpose()\n return matrix\n elif isinstance(other, int) or isinstance(other, float): # Skalering af matrix\n for i in range(len(self.vectors)):\n self.vectors[i] *= other\n else:\n raise ValueError(\n \"Can only multiply Matrix with Matrix, Vector, Integer or Float\"\n )", "def mult(p, q):\n if p.ndim == 1 and q.ndim > 1:\n p = np.tile(p,(q.shape[0],1))\n if q.ndim == 1 and p.ndim > 1:\n q = np.tile(q,(p.shape[0],1))\n if q.ndim == 1 and p.ndim == 1:\n p = p.reshape((1,4))\n q = q.reshape((1,4))\n\n ps = p[:,3]\n qs = q[:,3]\n pv = p[:,:3]\n qv = q[:,:3]\n\n pq = np.empty_like(p)\n pq[:,3] = ps * qs \n pq[:,3] -= arraylist_dot(pv, qv).flatten()\n pq[:,:3] = ps[:,np.newaxis] * qv \n pq[:,:3] += pv * qs[:,np.newaxis] \n pq[:,:3] += np.cross(pv , qv)\n\n #opposite sign due to different convention on the basis vectors\n #pq *= -1\n return pq", "def dot_vectors(u, v):\n return u[0] * v[0] + u[1] * v[1] + u[2] * v[2]", "def _eval_transpose(self):\n coeff, matrices = self.as_coeff_matrices()\n return MatMul(\n coeff, *[transpose(arg) for arg in matrices[::-1]]).doit()", "def __mul__(self, other): \n if isinstance(other, Iterable):\n # dot product\n return self.x * other[0] + self.y * other[1]\n else:\n # scalar product\n return Vector(self.x * other, self.y * other)", "def my_matmul(x, y):\n ##\n cmd = getattr(th, \"matmul\")\n x1, x2 = my_cut(x)\n y1, y2 = my_cut(y)\n x2y1 = cmd(x2, y1)\n x1y2 = cmd(x1, y2)\n x2y2 = cmd(x2, y2)\n ret = (x2y1 + x1y2) % int24field * int24field + x2y2\n ret = int48module(ret)\n return ret", "def matrix_dot(*args):\n rval = args[0]\n for a in args[1:]:\n rval = tm.dot(rval, a)\n return rval", "def python_nonsquare_matrix_mult(matrix):\n\n transposed_matrix = np.zeros([matrix.shape[1],matrix.shape[0]])\n start = time.time()\n # for i in range(matrix.shape[0]):\n # for j in range(matrix.shape[1]):\n # transposed_matrix[j,i] = matrix[i,j]\n\n transposed_matrix = np.transpose(matrix)\n product = matrix.dot(transposed_matrix)\n\n # transposed_matrix = np.transpose(matrix)\n end = time.time()-start\n\n # print(\"Python Golden Transpose: %s\" % product)\n # print('python transpose time: %.2E' % end)\n return [product, end]", "def test_gemm_with_vector():\r\n X, Y, Z, a, b = XYZab()\r\n v = T.vector()\r\n\r\n def my_just_gemm(o):\r\n i = [X, Y, Z, a, b, v]\r\n ishapes = [(4, 3), (3, 5), (4, 5), (), (), (5, )]\r\n rval = just_gemm(i, o, ishapes=ishapes)\r\n\r\n my_just_gemm([v + T.dot(X, Y) * a + Z * b])\r\n my_just_gemm([v + a * T.dot(X, Y) + b * Z])\r\n my_just_gemm([v + b * Z + a * T.dot(X, Y)])\r\n my_just_gemm([v + T.dot(X, Y) * a - Z * b])\r\n my_just_gemm([v + a * T.dot(X, Y) - b * Z])\r\n my_just_gemm([v + b * Z - a * T.dot(X, Y)])\r\n\r\n #with N multiplications instead of just one\r\n my_just_gemm([v + (b * b) * Z * a + (a * a) * T.dot(X, Y) * b])\r\n my_just_gemm([v + Z + T.dot(X, Y)])\r\n my_just_gemm([v + Z * b + T.dot(X, Y)])\r\n my_just_gemm([v + Z + a * b * a * T.dot(X, Y)])\r\n my_just_gemm([v + (b * b) * Z * a - (a * a) * T.dot(X, Y) * b])\r\n my_just_gemm([Z - T.dot(X, Y) + v])\r\n my_just_gemm([Z * b - T.dot(X, Y) + v])\r\n my_just_gemm([Z - a * b * a * T.dot(X, Y) + v])", "def de_mult(self,z):\n if isinstance(z,np.ndarray) and z.size>1:\n assert np.all(np.diff(z)>0.)\n return (z+1.)**(3.*(1.+self.w))", "def multiply(traj, result_list):\n z=traj.x*traj.y\n result_list[traj.v_idx] = z", "def multiply(t):\n return mul(*t)", "def rhs(t, conc, S_matrix, educt_matrix, kin_par):\n fluxes = calculate_fluxes(conc, S_matrix, educt_matrix, kin_par)\n return np.dot(S_matrix, fluxes)", "def multiply_vector(self, dv, spm):\n product = []\n for a, b in zip(dv, spm):\n product.append(a * b)\n return product", "def multiply(matrix, vector):\n result = []\n for row in matrix:\n assert len(row) == len(vector)\n result.append(sum([a*b for (a, b) in zip(row, vector)]))\n return Vector3D.from_list(result)", "def mat_vec_product(self, psi, t):\n\tx = zeros(self.vib_basis_size * len(self.my_tasks), dtype = complex)\n\n\t#Matrix vector product.\n\tfor i, j in enumerate(self.my_tasks):\n\t slice_x = slice(i * self.vib_basis_size, (i + 1) * self.vib_basis_size)\n\t slice_psi = slice(j * self.vib_basis_size, (j + 1) * self.vib_basis_size)\n\t \n\t x[slice_x] = dot(self.h_0[:,:,i], psi[slice_psi])\n\t\n\ty = dot(self.h_1, psi)\n\n\t#Weigh with field strength, and add components.\n\tpsi_final = x + self.time_function(t) * y\n\t\n\treturn psi_final", "def phi_t(self):\n\t\tdim = self.dim\n\t\ttim_all = self.tim_all \n\t\tphi_all = np.zeros((tim_all+1,dim,1),dtype = complex)\n\t\tphi_all[0,:,:] = self.phi_i[:]\n\t\tu_all = self.u_t()\n\n\t\tfor tim in xrange(tim_all):\n\t\t\tphi_all[tim+1,:,:] = np.dot(u_all[tim+1,:,:], phi_all[0,:,:])\n\t\t\n\t\treturn phi_all", "def dot_product(u, v):\n ret = 0.0\n for i in range(len(u)):\n ret += float(float(u[i]) * float(v[i]))\n return ret", "def matrix_multiplication_loop(x_matrix, y_matrix):\n result = []\n for i, row in enumerate(x_matrix):\n row_vector = []\n for j in range(len(y_matrix[0])):\n product = 0\n for k in range(len(row)):\n product += x_matrix[i][k] * y_matrix[k][j]\n row_vector.append(product)\n result.append(row_vector)\n return result", "def transform(self, v):\n #matrix vector multiply, convert from matrix to array type at the end\n return np.array( v * self.M )", "def f(t, x, n, v):\n total = 0\n for i in range(n+1):\n for j in range(n+1):\n for k in range(v):\n total = t[i][j] * x[i][j][k]", "def dot_product_scores(q_vectors: T, ctx_vectors: T) -> T:\n # q_vector: n1 x D, ctx_vectors: n2 x D, result n1 x n2\n r = torch.matmul(q_vectors, torch.transpose(ctx_vectors, 0, 1))\n return r", "def hadamard(u: np.ndarray, v: np.ndarray) -> np.ndarray:\n\n return u * v", "def _compute_t_matrix(self):\n self.t_matrix = self._kronecker_product(\n tf.diag(tf.reshape(self.likelihood_variances, [-1])),\n tf.eye(self.n_points_int, dtype=tf.float64))\n return", "def test_trotter_hamiltonian_scalar_mul(nqubits=3):\n local_ham = hamiltonians.TFIM(nqubits, h=1.0, trotter=True)\n target_ham = 2 * hamiltonians.TFIM(nqubits, h=1.0, numpy=True)\n local_dense = (2 * local_ham).dense\n np.testing.assert_allclose(local_dense.matrix, target_ham.matrix)\n\n local_ham = hamiltonians.TFIM(nqubits, h=1.0, trotter=True)\n local_dense = (local_ham * 2).dense\n np.testing.assert_allclose(local_dense.matrix, target_ham.matrix)", "def scalar_mult(x, y, out=None):\n if out is None:\n out = torch.zeros_like(y)\n else:\n if out is x or out is y:\n raise RuntimeError(\"Can't overwrite an argument!\")\n\n out[0] = (x[0] * y[0]) - (x[1] * y[1])\n out[1] = (x[0] * y[1]) + (x[1] * y[0])\n\n return out", "def monomio(x,datos_x,datos_y):\n matriz=np.zeros([datos_x.shape[0],datos_x.shape[0]])\n for j in range(datos_x.shape[0]): #Se contruye la matriz de vandermonde\n matriz[:,j]= datos_x**(j)\n matriz,datos_y=pivoteo_parcial(matriz,datos_y)\n x1= descompo_LU(matriz,datos_y)# se resulve el sistema de ecuaciones por metodo directo\n\n puntos=[] #se almacenan los valores de y para cada punto de x que se quiera calcular \n\n for p in x: #va a ir tomando los valores de x uno por uno \n prod=np.zeros(x1.shape[0])\n for i in range(x1.shape[0]):\n if i==0:\n prod[i]=1\n else:\n prod[i]=prod[i-1]*p #Se hace el calculo de los polimonios con todos los valores de x \n solucion=x1@prod\n puntos.append(solucion) # se agregan los valores de y a la lista final \n puntos=np.array(puntos)# se convierte la lista en array para mejor manejo\n\n return puntos", "def matrix_mult(m1, m2):\n pass", "def calcul_travail_ext(x,modU):\n\tr = np.sqrt(x[:,0]*x[:,0] + x[:,1]*x[:,1])\n\tf = r[:]*modU[:]*modU[:]\n\tW = PointMilieu(r,f)\n\treturn W", "def __matmul__(self, qubit):\n if isinstance(qubit, str):\n qubit = self.get_index(qubit)\n return self.compiled[qubit].y", "def interior_tensor_product(mx, dim_a, dim_b, e=None):\n assert _np.shape(mx) == (dim_a * dim_b, dim_a * dim_b), \"Dimensions do not agree with matrix size\"\n assert _np.shape(e)[0] == _np.shape(e)[1], \"e should be a square matrix\"\n basis_a = matrix_units(dim_a)\n basis_b = matrix_units(dim_b)\n return sum((_np.trace(_np.dot(mx, _np.kron(unit_a, unit_b).T)) * multikron([unit_a, e, unit_b])\n for unit_a in basis_a for unit_b in basis_b))", "def test_mueller_product(self, ):\n mdims = ('mueller_v', 'mueller_h')\n mm_1 = xr.DataArray(np.random.rand(4, 4, ), dims=mdims, )\n mm_2 = xr.DataArray(np.identity(4, ), dims=mdims, )\n sv_1 = xr.DataArray(np.random.rand(4, ), dims=('stokes', ), )\n\n assert_almost_equal(mm_1.values, mueller_product(mm_1, mm_2).values, )\n assert_almost_equal(mm_1.values, mueller_product(mm_2, mm_1).values, )\n assert_almost_equal(sv_1.values, mueller_product(mm_2, sv_1).data, )", "def matrix_mult_vec(matrix_a, x):\n m = len(matrix_a)\n b = [0 for i in xrange(m)]\n for i in xrange(m):\n b[i] = dot_product(matrix_a[i], x)\n return b", "def alternate_ls (u_num, Y, P, C, reg):\n\n\t# get # of items/users and # of latent factors\n\t[i_num, f_num] = Y.shape\n\n\t# output buffer\n\tX = np.zeros((u_num, f_num))\n\n\t# precalculate YtY to improve the performance\n\tYtY = Y.T * Y\n\n\t# iterate over each user/item\n\tfor u in range(u_num):\n\n\t\t# store the diagonal elements of the matrix Cu discussed in the paper in a vector\n\t\tCu = C[u,:]\n\n\t\t# store the coresponding row/column of the preference matrix\n\t\tPu = P[u,:]\n\n\t\t# compute Cu-I\n\t\tCu_I = Cu - 1\n\n\t\t# calculate Yt(Cu-I)Y\n\t\tYtCu_IY = np.zeros((f_num, f_num))\n\t\tCuIY = np.multiply(Y, Cu_I.T) # weight each row of Y with Cu-I\n\t\tfor row in range(f_num):\n\t\t\tfor col in range(f_num):\n\t\t\t\tYtCu_IY[row,col] = Y[:,row].T * CuIY[:,col]\n\t\t\n\t\t# left term : ((YtCuY + regI)^(-1)) = (YtY + Yt(Cu-I)Y + regI)^(-1)\n\t\tleft_inv = YtY + YtCu_IY + reg*np.eye(f_num)\n\t\tleft = np.linalg.inv(left_inv)\n\n\t\t# right term : YtCuPu\n\t\tright = Y.T * np.multiply(Cu.T, Pu.T)\n\n\t\t# compute the latent factor of the user/item\n\t\tx = left * right\n\t\t\n\t\t# store it in a matrix\n\t\tX[u,:] = x.T\n\n\t# return an MxF or NxF matrix\n\treturn np.matrix(X)", "def test_vector_dot_product(self):\n\n # Example 1.2\n vector_p = np.array([0.5, 0.0, 0.5])\n vector_q = np.array([0.5, 0.5, 0.0])\n crystal = crystal_system.Tetragonal(0.5, 1.0)\n magnitude_ref_nm = 5.0/16.0\n\n vector_d = vector_p - vector_q\n magnitude_nm = vector.dot_product(crystal, vector_d, vector_d)\n\n self.assertAlmostEqual(magnitude_ref_nm, magnitude_nm, 5)\n\n # Example 1.3\n vector_p = np.array([1.0, 2.0, 0.0])\n vector_q = np.array([3.0, 1.0, 1.0])\n crystal = crystal_system.Tetragonal(0.5, 1.0)\n magnitude_ref_nm = 5.0/4.0\n\n magnitude_nm = vector.dot_product(crystal, vector_p, vector_q)\n self.assertAlmostEqual(magnitude_ref_nm, magnitude_nm, 5)\n\n magnitude_nm = vector.dot_product(crystal, vector_q, vector_p)\n self.assertAlmostEqual(magnitude_ref_nm, magnitude_nm, 5)\n\n #self.fail(\"Test if the testcase is working.\")", "def _factorsY(self, inputs):\n return tensor.dot(inputs[1], self.wyf)", "def compute_hessian_vector_product(self, function, arguments):", "def __mul__(self, other):\n #\n # TODO - your code here\n #\n final_matrix = []\n for i in range(self.h):\n temp_row = []\n for j in range(other.w):\n # take dot-product of row of\n # matrix in 1st arg with col of\n # matrix in 2nd arg\n temp_row.append(dot_product(get_row(self.g, i), get_col(other.g, j)))\n final_matrix.append(temp_row)\n return Matrix(final_matrix)\n # TODO - your code here", "def Pol_Newton_un_punto(x,datos_x,datos_y):\n n = datos_x.shape[0]\n matriz=np.ones([n,n])\n for j in range(n):\n for i in range(n):\n if j>i:\n matriz[i][j]=0\n else:\n producto=1\n for k in range(j):\n producto=producto*(datos_x[i]-datos_x[k])\n matriz[i][j]=producto\n matriz,datos_y1= pivoteo_parcial(matriz,datos_y)\n x1 = descompo_LU(matriz,datos_y1)\n prod=np.zeros(x1.shape[0])\n for i in range(n):\n if i==0:\n prod[i]=1\n else: \n prod[i]=prod[i-1]*(x-datos_x[i-1])\n solucion=x1@prod\n return solucion", "def predict_mat(self):\n return self.u.dot(self.v.T)", "def learned_RHS(t,y,q,x,desc):\n \n \n Ux_mat = create_Ux_mat(x)\n Uxx_mat = create_Uxx_mat(x)\n\n return (q[desc.index('u_{x}')]*Ux_mat.dot(y) + \n q[desc.index('u_{xx}')]*Uxx_mat.dot(y) +\n q[desc.index('u^2')]*y**2 +\n q[desc.index('u')]*y + \n q[desc.index('u^2u_{x}')]*(y**2)*Ux_mat.dot(y) + \n q[desc.index('uu_{x}')]*y*Ux_mat.dot(y) + \n q[desc.index('u^2u_{xx}')]*(y**2)*Uxx_mat.dot(y) + \n q[desc.index('uu_{xx}')]*y*Uxx_mat.dot(y) + \n q[desc.index('u_{x}^2')]*Ux_mat.dot(y)**2)", "def __mul__(self, tensor):\n return self.mul(tensor)", "def inner_product(alpha, F, beta):\n return np.dot(alpha, np.dot(F, beta))", "def test_matmul_vv(self):\n self.check_dot_vv(matmul_usecase, \"'@'\")", "def lazy_matrix_mul(m_a, m_b):\n return np.dot(m_a, m_b)", "def matTimesVec(M, x):\n return [dot(m, x) for m in M]", "def tensdot(polyList,order,trunc):\n\n def reshape(poly,expo):\n\n poly.coef = poly[:][:,expo]\n poly.expo = expo\n return poly\n\n dim = len(polyList)\n expo = indextens(order,dim,trunc)\n nbrPoly = expo.shape[1]\n coef = np.eye(nbrPoly)\n\n # Tensor product of the univariate basis\n\n for i in range(dim): polyList[i] = reshape(polyList[i],expo[i])\n for i in range(nbrPoly): coef[i] = np.prod([polyList[j][expo[j,i]] for j in range(dim)],axis=0)\n\n poly = Polynomial(expo,coef,1)\n return poly", "def __mul__(self,v2):\n\t\tif(isinstance(v2,Vect2D)):\n\t\t\treturn np.dot(self._vec,v2._vec)\n\t\telse:\n\t\t\treturn Vect2D(v2*self._vec)", "def T(self) -> BaseMatrix:", "def T(self) -> BaseMatrix:", "def eval_f(self, u, t):\n f = self.f_init\n f[:] = self.A.dot(u.flatten()).reshape(self.nvars)\n return f", "def __matmul__(self, csys):\n self._transform(csys)\n return self", "def vectorMultiply(v, f):\n return [x * f for x in v]", "def outer_product(x):\n return keras.backend.batch_dot(\n x[0]\n , x[1]\n , axes=[1,1]\n ) / x[0].get_shape().as_list()[1]", "def forward(self, x: torch.Tensor) -> torch.Tensor:\n y = x.matmul(self.melmat)\n return y", "def mul_dense(x, y): # pragma: no cover\n return x * y", "def vec_dot(x, y):\r\n return sum(a * b for a, b in zip(x, y))", "def _parameter_dot_product(x: JaxComplexArray, y: JaxComplexArray, n_axes: int) -> JaxRealArray:\n axes = tuple(range(-n_axes, 0))\n return jnp.sum(x * y, axis=axes).real", "def scalar_product(self, u, v):\n sp = 0.0\n n1 = len(u)\n n2 = len(v)\n i = j = 0\n d = self.dictionary_db\n while (i < n1 and j < n2):\n if u[i].word_info(d).index > v[j].word_info(d).index:\n j += 1\n elif v[j].word_info(d).index > u[i].word_info(d).index:\n i += 1\n else:\n sp += self.tf_idf(u[i]) * self.tf_idf(v[j])\n i += 1\n j += 1\n\n return sp", "def apply(self, v):\n u = np.zeros(self.Dimension, dtype=complex)\n for me in self.Elements:\n for index in range(v.Elements.size):\n if index == me.j:\n u[me.i] += me.val * v.Elements[index]\n u = Vector(u) \n return u" ]
[ "0.7003199", "0.6513981", "0.64759356", "0.6454179", "0.6377554", "0.6326698", "0.6245358", "0.620894", "0.6208685", "0.61977005", "0.6195611", "0.61694974", "0.6168602", "0.6134469", "0.6106113", "0.60868716", "0.6082444", "0.60823506", "0.6070701", "0.60688484", "0.6063607", "0.60420716", "0.60411894", "0.6021142", "0.6020375", "0.60189974", "0.5976037", "0.5971896", "0.59694517", "0.5931813", "0.5929974", "0.5898846", "0.5893105", "0.5855257", "0.58545524", "0.58442146", "0.5836224", "0.5832956", "0.5814374", "0.58080685", "0.57904613", "0.57875574", "0.57833064", "0.57730234", "0.5772904", "0.57687515", "0.57664347", "0.57564145", "0.57501626", "0.5744411", "0.5741991", "0.57292855", "0.57247925", "0.5714161", "0.5701248", "0.5698843", "0.56850976", "0.56831443", "0.56521136", "0.56448597", "0.5643709", "0.5640813", "0.5639851", "0.56366783", "0.5632932", "0.5632897", "0.5624747", "0.5620976", "0.5618426", "0.5617202", "0.5615725", "0.5612312", "0.5605161", "0.5604705", "0.5597466", "0.5597071", "0.5594331", "0.5587542", "0.5583664", "0.55818576", "0.5576618", "0.55596435", "0.55546635", "0.55527973", "0.55502707", "0.5547592", "0.5543349", "0.55428696", "0.55401945", "0.55401945", "0.5539136", "0.553875", "0.55359316", "0.55332065", "0.5527199", "0.55229384", "0.5518009", "0.55145466", "0.54998636", "0.5499449" ]
0.63380134
5
Read in labels from digitStruct.mat file to create a dict of image file name and corresponding labels
def read_labels(digitstruct_file): labels = dict() for dsObj in tdqm(yieldNextDigitStruct(digitstruct_file), ncols=50): image_labels = [] for bbox in dsObj.bboxList: image_labels.append(bbox.label) labels[dsObj.name] = image_labels return labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_imagenet_as_dict(self):\n real_file_path = os.path.realpath(self.map_file)\n if not os.path.exists(real_file_path):\n raise IOError(\"map file {} not exists\".format(self.map_file))\n\n label_dict = {}\n with open(real_file_path) as fp:\n line = fp.readline()\n while line:\n labels = line.split(\" \")\n label_dict[labels[1]] = labels[0]\n line = fp.readline()\n\n # get all the dir which are n02087046, n02094114, n02109525\n dir_paths = {}\n for item in label_dict:\n real_path = os.path.join(self.image_dir, label_dict[item])\n if not os.path.isdir(real_path):\n logger.warning(\"{} dir is not exist\".format(real_path))\n continue\n dir_paths[item] = real_path\n\n if not dir_paths:\n raise PathNotExistsError(\"not valid image dir in {}\".format(self.image_dir))\n\n # get the filename, label and image binary as a dict\n for label in dir_paths:\n for item in os.listdir(dir_paths[label]):\n file_name = os.path.join(dir_paths[label], item)\n if not item.endswith(\"JPEG\") and not item.endswith(\"jpg\"):\n logger.warning(\"{} file is not suffix with JPEG/jpg, skip it.\".format(file_name))\n continue\n data = {}\n data[\"file_name\"] = str(file_name)\n data[\"label\"] = int(label)\n\n # get the image data\n real_file_path = os.path.realpath(file_name)\n image_file = open(real_file_path, \"rb\")\n image_bytes = image_file.read()\n image_file.close()\n if not image_bytes:\n logger.warning(\"The image file: {} is invalid.\".format(file_name))\n continue\n data[\"image\"] = image_bytes\n yield data", "def extract_labels(filename, num_images):\n filepath = os.path.join(WORK_DIRECTORY, filename)\n print('Extracting', filepath)\n with open(filepath, mode='rb') as bytestream:\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def read_stanford_labels():\n # First get the hardi data\n fetch_stanford_hardi()\n hard_img, gtab = read_stanford_hardi()\n\n # Fetch and load\n files, folder = fetch_stanford_labels()\n labels_file = pjoin(folder, \"aparc-reduced.nii.gz\")\n labels_img = nib.load(labels_file)\n return hard_img, gtab, labels_img", "def get_imagenet_classnames():\r\n return np.loadtxt(open(path_data+'/ilsvrc_2012_labels.txt'), dtype=object, delimiter='\\n')", "def get_images_and_labels(tampered_path, authentic_path):\n tampered_dir = tampered_path\n authentic_dir = authentic_path\n images = {}\n for im in glob.glob(authentic_dir):\n images[im] = {}\n images[im]['mat'] = cv2.imread(im)\n images[im]['label'] = 0\n for im in glob.glob(tampered_dir):\n images[im] = {}\n images[im]['mat'] = cv2.imread(im)\n images[im]['label'] = 1\n return images", "def load_labels(path, kmer=True, rg=True, clip=True, rna=True, go=True):\n\n labels = dict()\n if go: labels[\"X_GO\"] = gzip.open(os.path.join(path,\n \"matrix_GeneOntology.tab.gz\")).readline().split(\"\\t\")\n if kmer: labels[\"X_KMER\"] = gzip.open(os.path.join(path,\n \"matrix_RNAkmers.tab.gz\")).readline().split(\"\\t\")\n if rg: labels[\"X_RG\"] = gzip.open(os.path.join(path,\n \"matrix_RegionType.tab.gz\")).readline().split(\"\\t\")\n if clip: labels[\"X_CLIP\"] = gzip.open(os.path.join(path,\n \"matrix_Cobinding.tab.gz\")).readline().split(\"\\t\")\n if rna: labels[\"X_RNA\"] = gzip.open(os.path.join(path,\n \"matrix_RNAfold.tab.gz\")).readline().split(\"\\t\")\n return labels", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n return labels", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def read_label_file(self, label_file_name = None): #completed\n if label_file_name is None:\n label_file_name = self.label_file_name\n try:\n label_data = sp.loadmat(label_file_name)['labels'].astype(np.int32)\n return label_data#[:,1], label_data[:,0]#in MATLAB format\n except IOError:\n print \"Unable to open \", label_file_name, \"... Exiting now\"\n sys.exit()", "def get_pet_labels(images_dir):\r\n \r\n # Creates a list of files in directory from pet images directory\r\n in_files = listdir(images_dir)\r\n \r\n # Process each of the files such that the created dictionary would have\r\n # key = filename and the value = picture label\r\n \r\n # Create an empty dictionary to hold pet labels\r\n petlabels_dic = dict()\r\n \r\n \r\n \r\n for idx in range(0, len(in_files), 1): \r\n if in_files[idx][0] != \".\":\r\n pet_image_name = in_files[idx].split(\"_\")\r\n # Check if the first character is uppercase letter. If it is, then lowercase that first character\r\n if pet_image_name[0].isupper() : \r\n pet_image_name = pet_image_name.lower()\r\n # Create a temporary label variable to hold pet label name\r\n pet_label = \" \"\r\n \r\n # Process each of the character strings(words) split by '_' in \r\n # the list pet_image_name\r\n for word in pet_image_name: \r\n if word.isalpha():\r\n pet_label += word + \" \"\r\n pet_label = pet_label.strip()\r\n if in_files[idx] not in petlabels_dic:\r\n petlabels_dic[in_files[idx]] = [pet_label]\r\n else: \r\n print(\" Warning: Duplicate files exist in dictionary\", in_files[idx])\r\n \r\n \r\n # Return dictionary of pet lables\r\n return(petlabels_dic)", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n return labels", "def extract_labels(filename, num_images):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def get_image_labels_mapping(images_fp, labels_fp):\n name_map = {}\n\n for f in images_fp():\n image_name = f[0]['file']\n vars = {k.upper():v for k,v in f[0].items() if k!='file' }\n label_name = labels_fp.get_matching(**vars)[0]['file']\n name_map[image_name] = label_name\n return name_map", "def retrieve_labels(file, label_indices):\n\n\t# Initialize numpy matrix to store the images\n\tlabels = np.zeros((len(label_indices), 10))\n\n\twith open(file, \"rb\") as f:\n\t\t# Intialize counters\n\t\ti = 0\n\t\tlabel_number = 0\n\n\t\t# Read first byte\n\t\tbyte = f.read(1)\n\n\t\t# Find each image in the data file\n\t\tfor label_index in label_indices:\n\t\t\t# Read in bytes until you arrive at the label\n\t\t\twhile byte and (i < (label_index + 8)):\n\t\t\t\tbyte = f.read(1)\n\t\t\t\ti += 1\n\n\t\t\t# Store label value in numpy array\n\t\t\tvalue = int.from_bytes(byte, \"big\")\n\t\t\tlabels[label_number] = np.zeros(10)\n\t\t\tlabels[label_number, value] = 1\n\n\t\t\t# Increment to next label\n\t\t\tlabel_number += 1\n\n\treturn labels", "def get_images_and_labels_nc():\n refs = get_ref_df()\n images = {}\n for _, data in refs.iterrows():\n if data['ProbeFileName'] in images:\n continue\n im = data['ProbeFileName']\n images[im] = 1 if data['IsTarget'] == 'Y' else 0\n return images", "def make_label_map(path, label_list):\r\n \r\n img = []\r\n for name in path:\r\n now = np.zeros((224,224))\r\n im = cv2.resize(cv2.imread(name), (224,224)).tolist()\r\n for y, i in enumerate(im):\r\n for x, j in enumerate(i):\r\n try:\r\n now[y, x] = label_list.index(j)\r\n\r\n except ValueError:\r\n now[y, x] = 0\r\n\r\n img.append(now)\r\n return img", "def parse_labelfile(path):\n with open(path, \"r\") as FILE:\n lines = FILE.readlines()\n\n\n labels = {x.split(\":\")[0]: x.split(\":\")[1] for x in lines[1:]}\n\n for key in labels:\n labels[key] = np.array(labels[key].split(\",\")).astype(\"uint8\")\n\n return labels", "def extract_labels(pdbbind_label_file):\n assert os.path.isfile(pdbbind_label_file)\n labels = {}\n with open(pdbbind_label_file) as f:\n content = f.readlines()\n for line in content:\n if line[0] == \"#\":\n continue\n line = line.split()\n # lines in the label file have format\n # PDB-code Resolution Release-Year -logKd Kd reference ligand-name\n #print line[0], line[3]\n labels[line[0]] = line[3]\n return labels", "def create_labelmapDict_patch(list_all_images, path_dataset):\n list_all_classes = []\n for idx, name_image_ in enumerate(list_all_images):\n _, tail = os.path.split(name_image_)\n temp_obj = []\n name_file_xml_all = os.path.join(path_dataset, 'LABELS', tail[0:-3] + 'xml')\n if os.path.exists(name_file_xml_all):\n with tf.gfile.GFile(name_file_xml_all, 'rb') as fid:\n xml_str = fid.read()\n xml = etree.fromstring(xml_str)\n data = tfrecord_util.recursive_parse_xml_to_dict(xml)['annotation']\n if 'object' in data:\n for obj in data['object']:\n name_in_obj_ = obj['name'].replace(' ', '').strip()\n if name_in_obj_ != 'INCOMPLETAS':\n list_all_classes.append(name_in_obj_)\n temp_obj.append(obj)\n # list_all_classes = unique_list(list_all_classes)\n list_all_classes = list(set(list_all_classes))\n list_all_classes.sort()\n list_all_classes.insert(0, 'background')\n labelmap_ = {el: k for k, el in enumerate(list_all_classes)}\n return labelmap_", "def read_idx_2_label():\n with open('../Data/imagenet_class_index.json') as f:\n dictionary = json.load(f)\n return dictionary", "def extract_labels(filename, num_images):\n gt_imgs = []\n for i in range(1, num_images+1):\n imageid = \"satImage_%.3d\" % i\n image_filename = filename + imageid + \".png\"\n if os.path.isfile(image_filename):\n print ('Loading ' + image_filename)\n img = mpimg.imread(image_filename)\n gt_imgs.append(img)\n else:\n print ('File ' + image_filename + ' does not exist')\n\n num_images = len(gt_imgs)\n gt_patches = [img_crop(gt_imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE, 0, False) for i in range(num_images)]\n data = numpy.asarray([gt_patches[i][j] for i in range(len(gt_patches)) for j in range(len(gt_patches[i]))])\n labels = numpy.asarray([value_to_class(numpy.mean(data[i])) for i in range(len(data))])\n\n # Convert to dense 1-hot representation.\n return labels.astype(numpy.float32)", "def load_data():\n\n training_files_dir = \"digits/trainingDigits\"\n training_files = os.listdir(training_files_dir)\n file_num = len(training_files)\n hw_labels = []\n\n training_mat = zeros((file_num, 32 * 32))\n for i in xrange(file_num):\n filename = training_files[i]\n file_label = int((filename.split(\".\")[0]).split(\"_\")[0])\n hw_labels.append(file_label)\n training_mat[i, :] = img2vector(training_files_dir + '/' + filename)\n\n return training_mat, hw_labels", "def load_label(self, file, variable_name=\"group\"):\n data = scipy.io.loadmat(file)\n self.logger.info(\"loading mat file %s\", file)\n label = data[variable_name].todense().astype(np.int)\n label = np.array(label)\n print(label.shape, type(label), label.min(), label.max())\n return label", "def load_labels(path):\n with open(path, \"r\", encoding=\"utf-8\") as f:\n lines = f.readlines()\n labels = {}\n for row_number, content in enumerate(lines):\n pair = re.split(r\"[:\\s]+\", content.strip(), maxsplit=1)\n if len(pair) == 2 and pair[0].strip().isdigit():\n labels[int(pair[0])] = pair[1].strip()\n else:\n labels[row_number] = pair[0].strip()\n # print(labels)\n return labels", "def create_readable_names_for_imagenet_labels():\n\n base_url = 'http://cnbj1-fds.api.xiaomi.net/ml-datasets/imagenet/' # noqa\n synset_url = '{}/imagenet_lsvrc_2015_synsets.txt'.format(base_url)\n synset_to_human_url = '{}/imagenet_metadata.txt'.format(base_url)\n\n filename, _ = urllib.urlretrieve(synset_url)\n synset_list = [s.strip() for s in open(filename).readlines()]\n num_synsets_in_ilsvrc = len(synset_list)\n assert num_synsets_in_ilsvrc == 1000\n\n filename, _ = urllib.urlretrieve(synset_to_human_url)\n synset_to_human_list = open(filename).readlines()\n num_synsets_in_all_imagenet = len(synset_to_human_list)\n assert num_synsets_in_all_imagenet == 21842\n\n synset_to_human = {}\n for s in synset_to_human_list:\n parts = s.strip().split('\\t')\n assert len(parts) == 2\n synset = parts[0]\n human = parts[1]\n synset_to_human[synset] = human\n\n label_index = 1\n labels_to_names = {0: 'background'}\n for synset in synset_list:\n name = synset_to_human[synset]\n labels_to_names[label_index] = name\n label_index += 1\n\n return labels_to_names", "def labels_for_training_data():\n current_id = 0\n label_ids = dict()\n faces, faces_ids = list(), list()\n\n # Go through directories and find label and path to image\n for root, dirs, files in walk('data/'):\n for file in files:\n if file.endswith('.jpg') or file.endswith('.png'):\n img_path = path.join(root, file)\n label = path.basename(root).replace(' ', '-').lower()\n if label not in label_ids:\n label_ids[label] = current_id\n current_id += 1\n id_ = label_ids[label]\n\n test_img = cv2.imread(img_path)\n test_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY)\n if test_img is None:\n print('Image not loaded properly')\n continue\n\n faces.append(test_img)\n faces_ids.append(id_)\n\n # Make directory with labels doesn't exist make directory and file with labels\n if not path.exists('labels/'):\n makedirs('labels/')\n with open('labels/face-labels.pickle', 'wb') as file:\n pickle.dump(label_ids, file)\n\n return faces, faces_ids", "def load_label(self, idx):\n im = open('{}/GTTXT/{}.txt'.format(root_dir, idx))\n\t#print(type(im.readlines()[0].rstrip(\"\\n\")))\n rgb_label = [i.rstrip(\"\\n\").split(\" \") for i in im.readlines()]\n\tlabel=[]\t\n\tfor i in rgb_label:\n\t\tlabel+=[int(j) for j in i]\n\tlabel=np.array(label).reshape(720,960)\n\tlabel[label==-1]=12\n\t#print(np.unique(label))\n #label = label[np.newaxis, ...]\n return label", "def ExtractLabel(ImgName):\n # Each img has name notation \"*****a0X*\" where X is PlasticType\n PlasticType = ImgName[7] \n return {\n '1': 0, # PET\n '2': 1, # HDPE\n '4': 2, # LDPE\n '5': 3, # PP\n '6': 4, # PS\n '7': 5, # Other\n }[PlasticType]", "def read_image_with_label(dir, file):\n assert type(file) == str, \"File name is not string.\"\n f = os.path.join(dir, file)\n info = file.split(\"_\")\n try:\n label = [int(info[x]) for x in range(1, 3)]\n except:\n print(\"The format of file name is not correct.\")\n else:\n return Image.open(f), label", "def get_pet_labels(image_dir):\n # Create dictionary\n petlabels_dic = {}\n\n # Retrieve the filenames from folder pet_images/\n # Try to catch exceptions (folder does not exists, etc..)\n try:\n filename_list = listdir(image_dir)\n except:\n print('** Error: unable to list files in \"{}\" folder.'.format(image_dir))\n exit()\n else:\n for idx in range(0,len(filename_list)):\n #if filename_list[idx] not in petlabels_dic: # required? probably not\n # Remove extension from filename\n filename = filename_list[idx].split('.')[0]\n # Create a list of words from filename, removing digits\n filename_labels = list(filter(lambda label: label.isalpha(), filename.split('_')))\n # Create key->value item in dictonary\n petlabels_dic[filename_list[idx]] = [\" \".join(filename_labels).lower()]\n\n # Return dictionary\n return petlabels_dic", "def _read_color_labels(filename):\n line_parser = lambda line: (int(line.split(',')[0]), line.split(',')[-1])\n with open(filename, 'r') as labels:\n label_map = dict([line_parser(line.strip()) for line in labels])\n return label_map", "def extract_labels(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(10000)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def extract_labels(filename, num_images):\n\n# this function definition has been taken from internet \n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(1 * num_images)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64) #Interpret a buffer as a 1-dimensional array\n return labels", "def load_labeled_data():\n\n images = []\n labels = []\n\n for i in range(1, 10):\n path = (\"selflabeled\", str(i), \"*.jpg\")\n filenames = glob.glob(\"/\".join(path))\n images_one_type = [cv2.imread(img) for img in filenames]\n labels_one_type = [i] * len(images_one_type)\n images += images_one_type\n labels += labels_one_type\n\n return images, labels", "def read_images(self, img_name, label_name):\n image_string = tf.read_file(img_name)\n image_decoded = tf.image.decode_jpeg(image_string, channels=3)\n label_string = tf.read_file(label_name)\n label_decoded = tf.image.decode_jpeg(label_string, channels=1)\n return image_decoded, label_decoded", "def load_labels(filename):\n\n file_path = os.path.join(DATA_DIR, filename)\n with open(file_path, 'rb') as f:\n b = f.read()\n\n magic, n_labels = (struct.unpack('>i', b[i*4:(i+1)*4]) for i in range(2))\n\n assert magic[0] == 2049, \"bad magic number, what do?\"\n\n label_stream = array.array('B', b[8:])\n \n assert len(label_stream) == n_labels[0], \"mismatch in label length\"\n \n # label_stream is actually type array.array, which is iterable surely.\n # i'll convert it anyway...\n return tuple(label_stream)", "def _extract_labels(self, filename, one_hot=False):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = self._read32(bytestream)\n if magic != 2049:\n raise ValueError(\n 'Invalid magic number %d in MNIST label file: %s' %\n (magic, filename))\n num_items = self._read32(bytestream)\n buf = bytestream.read(num_items)\n labels = np.frombuffer(buf, dtype=np.uint8)\n if one_hot:\n return self._dense_to_one_hot(labels)\n return labels", "def unpack_labels(self, labels,\n is_box = False):\n unpacked_labels = {}\n count = 0\n for level in range(self.min_level, self.max_level + 1):\n feat_size_y = tf.cast(self.image_size[0] / 2 ** level, tf.int32)\n feat_size_x = tf.cast(self.image_size[1] / 2 ** level, tf.int32)\n steps = feat_size_y * feat_size_x * self.anchors_per_location\n if is_box:\n unpacked_labels[level] = tf.reshape(labels[count:count + steps],\n [-1, 4])\n else:\n unpacked_labels[level] = tf.reshape(labels[count:count + steps],\n [feat_size_y, feat_size_x, -1])\n count += steps\n return unpacked_labels", "def read_label_from_xml(label_path):\n labels = parseXML(label_path)\n label_dic = {}\n for label in labels:\n first_frame = label.firstFrame\n nframes = label.nFrames\n size = label.size\n obj_type = label.objectType\n for index, place, rotate in zip(range(first_frame, first_frame+nframes), label.trans, label.rots):\n if index in label_dic.keys():\n label_dic[index][\"place\"] = np.vstack((label_dic[index][\"place\"], place))\n label_dic[index][\"size\"] = np.vstack((label_dic[index][\"size\"], np.array(size)))\n label_dic[index][\"rotate\"] = np.vstack((label_dic[index][\"rotate\"], rotate))\n else:\n label_dic[index] = {}\n label_dic[index][\"place\"] = place\n label_dic[index][\"rotate\"] = rotate\n label_dic[index][\"size\"] = np.array(size)\n return label_dic, size", "def get_labeled_data(imagefile, labelfile):\n # Open the images with gzip in read binary mode\n images = open(imagefile, 'rb')\n labels = open(labelfile, 'rb')\n\n # Read the binary data\n # We have to get big endian unsigned int. So we need '>I'\n\n # Get metadata for images\n images.read(4) # skip the magic_number\n number_of_images = images.read(4)\n number_of_images = unpack('>I', number_of_images)[0]\n rows = images.read(4)\n rows = unpack('>I', rows)[0]\n cols = images.read(4)\n cols = unpack('>I', cols)[0]\n\n # Get metadata for labels\n labels.read(4) # skip the magic_number\n N = labels.read(4)\n N = unpack('>I', N)[0]\n\n if number_of_images != N:\n raise Exception('number of labels did not match the number of images')\n\n # Get the data\n X = np.zeros((N, rows * cols), dtype=np.uint8) # Initialize numpy array\n y = np.zeros(N, dtype=np.uint8) # Initialize numpy array\n for i in range(N):\n for id in range(rows * cols):\n tmp_pixel = images.read(1) # Just a single byte\n tmp_pixel = unpack('>B', tmp_pixel)[0]\n X[i][id] = tmp_pixel\n tmp_label = labels.read(1)\n y[i] = unpack('>B', tmp_label)[0]\n return (X, y)", "def get_labeled_data(imagefile, labelfile):\n # Open the images with gzip in read binary mode\n images = open(imagefile, 'rb')\n labels = open(labelfile, 'rb')\n\n # Read the binary data\n # We have to get big endian unsigned int. So we need '>I'\n\n # Get metadata for images\n images.read(4) # skip the magic_number\n number_of_images = images.read(4)\n number_of_images = unpack('>I', number_of_images)[0]\n rows = images.read(4)\n rows = unpack('>I', rows)[0]\n cols = images.read(4)\n cols = unpack('>I', cols)[0]\n\n # Get metadata for labels\n labels.read(4) # skip the magic_number\n N = labels.read(4)\n N = unpack('>I', N)[0]\n\n if number_of_images != N:\n raise Exception('number of labels did not match the number of images')\n\n # Get the data\n X = np.zeros((N, rows * cols), dtype=np.uint8) # Initialize numpy array\n y = np.zeros(N, dtype=np.uint8) # Initialize numpy array\n for i in range(N):\n for id in range(rows * cols):\n tmp_pixel = images.read(1) # Just a single byte\n tmp_pixel = unpack('>B', tmp_pixel)[0]\n X[i][id] = tmp_pixel\n tmp_label = labels.read(1)\n y[i] = unpack('>B', tmp_label)[0]\n return (X, y)", "def load_labels(label_path):\r\n\r\n with open(label_path, \"r\") as f:\r\n\r\n lines = f.readlines()\r\n \r\n label = {}\r\n index = []\r\n for i, line in enumerate(lines):\r\n sp = line.split()\r\n label[sp[0]] = [int(sp[1]),int(sp[2]),int(sp[3])]\r\n index.append([int(sp[3]),int(sp[2]),int(sp[1])])\r\n\r\n return label, index", "def read_labeled_image_list(image_list_file):\n f = open(image_list_file, 'r')\n filenames = []\n labels = []\n for line in f:\n filename, label = line[:-1].split(' ')\n filenames.append(filename)\n labels.append(int(label))\n return filenames, labels", "def prep_image_data(arg_dict):\n cat_df = pd.read_csv(arg_dict['category_file'],\n skiprows=1,\n sep='\\s+')\n bbox_df = pd.read_csv(arg_dict['bbox_file'],\n skiprows=1,\n sep='\\s+')\n img_dir = arg_dict['image_dir']\n\n combo_df = pd.merge(cat_df, bbox_df, how='outer', on='image_name')\n combo_df['image_name'] = combo_df['image_name'].apply(\n lambda x: x[len('img'):-len('.jpg')])\n labels = Labels(combo_df, img_dir, n_images_loaded=-1)\n labels.set_data_target('raw_image', chunksize=3000)\n return labels", "def image_classes():\n\n image_data_path = PROJECT_ROOT + \"/data/CUB_200_2011/\"\n\n # <class_id> <class_name>\n classes = open(image_data_path + \"classes.txt\").readlines()\n classes = [i.strip().split() for i in classes]\n\n # <image_id> <class_id>\n labels = open(image_data_path + \"image_class_labels.txt\").readlines()\n labels = [i.strip().split() for i in labels]\n\n class_ids = {}\n for i in classes:\n class_ids[i[1]] = int(i[0])\n\n label_ids = {}\n for i in labels:\n label_ids[int(i[0])] = int(i[1])\n\n return class_ids, label_ids", "def read_label_data(mode, image_type):\n return np.loadtxt(parse_path(mode, image_type, True), dtype=int, delimiter='\\n')", "def get_label(img_path):\n img_name = img_path.stem\n label_name = img_name + \".txt\"\n label_path = img_path.parent / label_name\n with open(label_path) as f:\n label = json.load(f)\n return label", "def getList(self):\n labelMap = {}\n imageMap = {}\n key = []\n index = 0\n\n for root, dirs, files in os.walk(self.path_data):\n for file in files:\n # If .png or .jpg file found then\n if file.endswith(tuple(config.imageFormat)):\n key.append(index)\n labelMap[index] = preprocessing.getLabel(file)\n imageMap[index] = os.path.join(root, file)\n\n index += 1\n\n else:\n continue\n\n return key, imageMap, labelMap", "def extract_labels(filename, num_images, starting_id, context_factor):\n gt_imgs = []\n for i in range(starting_id, num_images+starting_id):\n imageid = \"satImage_%.3d\" % i\n image_filename = filename + imageid + \".png\"\n if os.path.isfile(image_filename):\n print ('Loading ' + image_filename)\n img = mpimg.imread(image_filename)\n gt_imgs.append(img)\n else:\n print ('File ' + image_filename + ' does not exist')\n\n num_images = len(gt_imgs)\n # it means that we base our labels only on the core of the patch, not including the contet added\n context_factor = 0\n gt_patches = [img_crop_context(gt_imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE,context_factor) for i in range(num_images)]\n data = np.asarray([gt_patches[i][j] for i in range(len(gt_patches)) for j in range(len(gt_patches[i]))])\n labels = np.asarray([value_to_class(np.mean(data[i])) for i in range(len(data))])\n\n # Convert to dense 1-hot representation.\n return labels.astype(np.float32)", "def __read_img_file(filename, label):\n image = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB)\n height, width, _ = image.shape\n image = cv2.resize(image, (img_size, img_size))\n # A label is consist of [y1, x1, y2, x2, class_idx]\n label = np.reshape(label, (-1, 5))\n rel_bboxes = label[..., 0:4] / np.array([height, width, height, width], np.float32)\n label = np.concatenate([rel_bboxes, np.expand_dims(label[..., -1], 1)], axis=-1)\n return image, label", "def load_pixel_labels(pixel_labels_dir, photo_id):\n\n pixel_labels_path = os.path.join(pixel_labels_dir, '%s.npy' % photo_id)\n if not os.path.exists(pixel_labels_path):\n raise ValueError('Could not find ground truth labels at \"%s\"' % pixel_labels_path)\n\n return np.load(pixel_labels_path)", "def load_pixel_labels(pixel_labels_dir, photo_id):\n\n pixel_labels_path = os.path.join(pixel_labels_dir, '%s.npy' % photo_id)\n if not os.path.exists(pixel_labels_path):\n raise ValueError('Could not find ground truth labels at \"%s\"' % pixel_labels_path)\n\n return np.load(pixel_labels_path)", "def load_labels(path):\n with open(path, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n labels = {}\n for row_number, content in enumerate(lines):\n pair = re.split(r'[:\\s]+', content.strip(), maxsplit=1)\n if len(pair) == 2 and pair[0].strip().isdigit():\n labels[int(pair[0])] = pair[1].strip()\n else:\n labels[row_number] = pair[0].strip()\n return labels", "def read_labeled_image_list(image_list_file):\n\tf = open(image_list_file, 'r')\n\tfilenames = []\n\tlabels = []\n\tfor line in f:\n\t\tline = line.rstrip('\\n')\n\n\t\tfilename, _, label = line.partition(LABEL_SEP)#line[:-1].split(LABEL_SEP)\n\t\tfilenames.append(filename)\n\t\tlabels.append(int(label))\n\t\t#print (filename+LABEL_SEP+\":) \"+label)\n\treturn filenames, labels", "def __init__(self, path, type = 'mrk') :\n stim = np.loadtxt(path, skiprows = 1, usecols = (0,1), dtype = np.dtype(int))\n labels = np.loadtxt(path, skiprows = 1, usecols = 2, dtype = np.dtype(str))\n\n self.dic = dict.fromkeys(labels)\n for key, _ in self.dic.items() : self.dic[key] = []\n for k in range(len(stim)) :\n self.dic[labels[k]].append(stim[k, :])\n return None", "def detect_labels(path):\n client = vision.ImageAnnotatorClient()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n response = client.label_detection(image=image)\n labels = response.label_annotations\n #print('Labels:')\n\n #for label in labels:\n # print(label.description)\n return labels", "def extract_labels(nlabels,filename, one_hot=False):\n print('Extracting', filename,'bbbccicicicicib')\n\n labels=numpy.loadtxt(filename,dtype='int64')\n \n if one_hot:\n print(\"LABELS ONE HOT\")\n print(labels.shape)\n XXX=dense_to_one_hot(labels,nlabels)\n print(XXX.shape)\n return dense_to_one_hot(labels,nlabels)\n print(\"LABELS\")\n print(labels.shape)\n return labels", "def read_rich_labels(path):\n\tlocation_dict = {}\n\twith open(os.path.join(path,'rich_labels.txt')) as f:\n\t\tcontent = f.readlines()\n\tfor line in content:\n\t\tlinecontent = line.split()\n\n\t\t# make sure each line is structured as follows:<image name> <latitude> <longitude>\n\t\tassert len(linecontent) >= 3, \"Unexpectedly short line in rich_labels.txt: \" + line\n\t\tif len(linecontent) > 3: \n\t\t\twarnings.warn('Unexpected line in rich_labels.txt: ' + line + \n\t\t\t \t\t\t '\\n Using first three words: ' + str(linecontent), stacklevel=0)\n\t\ttry:\n\t\t\tlocation_dict[linecontent[0]] = (float(linecontent[1]),float(linecontent[2]))\n\n\t\t\t# make sure you have latitude and longitude coordinates are not flipped\n\t\t\t# assuming that images are from North America\n\t\t\tassert float(linecontent[1]) <= float(linecontent[2])\n\n\t\texcept ValueError as e:\n\t\t\twarnings.warn(\"Unexpected lat/long in rich_labels.txt: \" + \n\t\t\t\t\t\t str(linecontent[1:3]), stacklevel=0)\n\treturn location_dict", "def load_data(self):\n sets = ['train', 'val']\n images = []\n labels = []\n self.labels_dic = {}\n file = open(self.path + 'wnids.txt')\n train_labels = file.read().split()\n if self.train:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n for i in os.listdir(self.path + 'train/' + f + '/images/'):\n images.append(Image.open(self.path + 'train/' + f + '/images/' + i))\n labels.append(f)\n #image label n link to folder names of TinyImageNet\n self.labels_dic[f] = fn\n\n else:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n self.labels_dic[f] = fn\n file_val = open(self.path + 'val/val_annotations.txt')\n val_labels = file_val.read().split('\\n')\n for im in val_labels:\n im_data = im.split(\"\t\")[:2]\n if len(im_data) < 2:\n continue\n if im_data[1] in self.labels_dic:\n images.append(Image.open(self.path + 'val/images/' + im_data[0]))\n labels.append(im_data[1])\n\n self.images = images\n self.labels = labels", "def file_reader(image_file, label_file):\n\n image = im.imread(image_file)\n\n with open(label_file, \"r\") as file:\n label = float(file.read())\n\n return image, label", "def _pickle_load(filename):\n with open(filename, 'rb') as f:\n save = pickle.load(f)\n image = save['image'].astype(np.float32)\n label = np.float32(save['label'])\n label = reformat_labels(label)\n return image, label", "def load_labels(path, encoding='utf-8'):\r\n with open(path, 'r', encoding=encoding) as f:\r\n lines = f.readlines()\r\n if not lines:\r\n return {}\r\n\r\n if lines[0].split(' ', maxsplit=1)[0].isdigit():\r\n pairs = [line.split(' ', maxsplit=1) for line in lines]\r\n return {int(index): label.strip() for index, label in pairs}\r\n else:\r\n return {index: line.strip() for index, line in enumerate(lines)}", "def load_labels():\n filename = os.path.join(config['inference']['model_dir'], 'output_labels.txt')\n global labels\n labels = [line.rstrip() for line in tf.gfile.FastGFile(filename)]", "def load_label(path: str) -> dict:\n if not os.path.exists(path):\n print(f\"Warning, try to load non-exist label {path}\")\n return None\n return np.load(path, allow_pickle=True).tolist()", "def read_label_file(file_path):\n with open(file_path, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n ret = {}\n for row_number, content in enumerate(lines):\n pair = re.split(r'[:\\s]+', content.strip(), maxsplit=1)\n if len(pair) == 2 and pair[0].strip().isdigit():\n ret[int(pair[0])] = pair[1].strip()\n else:\n ret[row_number] = pair[0].strip()\n return ret", "def extract_labels(filename,tag,one_hot):\n print('Extracting labels',filename)\n return extractdb_labels(filename,tag,one_hot=one_hot)", "def detect_labels(path):\n client = vision.ImageAnnotatorClient()\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n image = vision.types.Image(content=content)\n response = client.label_detection(image=image)\n labels = response.label_annotations\n print('Labels:')\n return response", "def _read_labels(test_data=False):\n if not test_data:\n filename = os.path.join(FOLDER_PATH, 'train-labels.idx1-ubyte')\n else:\n filename = os.path.join(FOLDER_PATH, 't10k-labels.idx1-ubyte')\n if not os.path.exists(filename):\n raise ValueError('The file dose not exist.')\n \n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer([filename])\n \n # The first 8 bytes contain file information:\n # [offset] [type] [value] [description]\n # 0000 32 bit integer 0x00000801(2049) magic number\n # 0004 32 bit integer 60000/10000 number of items \n # ...(label value)\n header_bytes = 8\n # Every record consists of a label, with a fixed number of bytes for each.\n record_bytes = 1\n \n # Create a FixedLengthRecordReader to read record.\n reader = tf.FixedLengthRecordReader(record_bytes=record_bytes,\n header_bytes=header_bytes)\n _, value = reader.read(filename_queue)\n\n # Convert from a string to a vector of uint8, then cast to int32.\n record = tf.cast(tf.decode_raw(value, tf.uint8), tf.int32)\n \n # Reshape from [1] to a scalar shape [].\n label = tf.reshape(record, [])\n\n return label", "def load_data(fname):\n pathname = \"data/\" + fname\n data = pickle.load(open(pathname, 'rb'), encoding='latin1')\n images = np.array([img[:-1] for img in data])\n ys = [int(img[-1]) for img in data]\n length = len(ys)\n labels = np.zeros((length, 10))\n\n for i in range(length):\n labels[i, ys[i]] = 1\n\n return images, labels", "def get_labels(label_file):\n labels = None\n with open(label_file, 'r') as infile:\n reader = csv.reader(infile)\n labels = dict((rows[0], rows[1]) for rows in reader)\n return labels", "def get_img_labels(task, nb_img=None):\n # Read the csv file matching the ids of the images with the classes\n labels = OrderedDict()\n\n with open('data/' + ('id_train' if task == 'training' else 'sample_submission4') + '.csv', 'rb') as csvfile:\n rows = reader(csvfile, delimiter=',')\n rows.next() # Skip the header\n for row in rows:\n if nb_img is not None and len(labels) >= nb_img:\n break\n labels[row[0]] = int(row[1]) # Integer conversion of the labels\n\n return labels", "def get_label_vectors():\n print(\"Retrieving label vectors...\")\n label_dict = {} # instantiate dict for labels:vectors\n categories = sorted([c for c in os.listdir('images/') if c[0] != '.']) # ignore hidden files\n x = np.zeros(len(categories)) # zero vector of number of categories\n for i, c in enumerate(categories): # get index and category for images\n y = x.copy() # use copy of x\n y[i] = 1 # set label index to true\n label_dict[c] = y.copy() # create label:vector\n\n return label_dict", "def extract_labels(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError(\n 'Invalid magic number %d in MNIST label file: %s' %\n (magic, filename))\n num_items = _read32(bytestream)\n buf = bytestream.read(num_items)\n labels = np.frombuffer(buf, dtype=np.uint8)\n return dense_to_one_hot(labels)", "def _load_images_and_labels(image_dir):\n\n print('Extracting images from: ', image_dir)\n\n image_paths = _load_image_paths(image_dir)\n images = _extract_images(image_paths)\n num_images = len(image_paths)\n labels = np.ones(num_images, dtype=np.int64)\n\n return images, labels", "def load_leaf():\n # List of image file names\n dataset_directory = os.path.join(root_directory,'Leaf_2')\n filenames = os.listdir(dataset_directory)\n filenames.sort()\n\n # List of bitmap Shapes; each row is a Image of the dataset\n data = []\n\n # Numpy array of labels associated to each class of image\n target = np.empty([len(filenames), ])\n\n previous_label = ''\n class_num = -1\n index = 0\n\n for index, filename in enumerate(filenames):\n data.append(Bitmap(io.imread(os.path.join(dataset_directory, filename))))\n file_label = filename.split('-')[0]\n\n if(previous_label != file_label):\n previous_label = file_label\n class_num += 1\n target[index] = class_num\n else:\n target[index] = class_num\n\n return {'bitmaps': data, 'targets': target}", "def _parse_raw_labels(self, lines):\r\n images = []\r\n labels = []\r\n idx = 0\r\n while idx < len(lines):\r\n image_path = lines[idx].strip()\r\n images.append(self._real_image_path(image_path))\r\n idx += 1\r\n\r\n num = int(lines[idx])\r\n idx += 1\r\n\r\n labels_ = []\r\n for _ in range(num):\r\n x1, y1, w, h, blur, expression, illumination, invalid, \\\r\n occlusion, pose = [int(v) \r\n for v in lines[idx].strip().split()]\r\n x2, y2 = x1 + w - 1, y1 + h - 1 # -1 to get the read x2, y2\r\n\r\n labels_.append([x1, y1, x2, y2])\r\n idx += 1\r\n \r\n labels.append(np.array(labels_))\r\n\r\n self._data_map[self._real_image_path(image_path)] = np.array(labels_)\r\n return np.array(images), np.array(labels)", "def extract_labels(filename, one_hot=False):\n\tprint('Extracting', filename)\n\twith gzip.open(filename) as bytestream:\n\t\tmagic = _read32(bytestream)\n\t\tif magic != 2049:\n\t\t\traise ValueError('Invalid magic number %d in MNIST label file: %s' %(magic, filename))\n\t\tnum_items = _read32(bytestream)\n\t\tbuf = bytestream.read(num_items)\n\t\tlabels = numpy.frombuffer(buf, dtype=numpy.uint8)\n\t\tif one_hot:\n\t\t\treturn dense_to_one_hot(labels)\n\t\treturn labels", "def label_mapping(filename):\n\n\t\n\n\n\twith open(filename, 'r') as infile:\n\t\treader = csv.reader(infile)\n\t\tnext(reader, None) # ignore first line since they're column labels\n\n\t\t#filename, artist, title, style, genre, date\n\t\tfor line in reader:\n\t\t\timg = line[0]\n\t\t\tartist = line[1]\n\t\t\tstyle = line[3]\n\t\t\tgenre = line[4]\n\t\t\tdate = re.findall(r'\\d+', line[5]) #parse any unwanted stuff\n\n\t\t\t#img and artist fields always present, no need to check\n\t\t\tartist_labels[img] = artist\n\n\n\t\t\tif style != '' and style in style_check:\n\t\t\t\t#if sum(x == style for x in style_labels.values()) < max_examples: # avoid imbalance\n\t\t\t\tstyle_labels[img] = style\n\n\n\t\t\tif genre != '' and genre in genre_check:\n\t\t\t\t#if sum(x == genre for x in genre_labels.values()) < max_examples:\n\t\t\t\tgenre_labels[img] = genre\n\n\n\t\t\tif len(date) > 0:\n\t\t\t\tbucket_len = 10 #buckets of 10 years\n\t\t\t\tbucket = (int(date[0]) // bucket_len) * bucket_len \n\t\t\t\tperiod = str(bucket) + '-' + str(bucket + (bucket_len - 1))\n\n\t\t\t\tif period in date_check:\n\t\t\t\t\t#if sum(x == period for x in date_labels.values()) <= max_examples:\n\t\t\t\t\tdate_labels[img] = period #parsed_date", "def mapping_image_to_label (self, labels_df, polygons, fpath_tiff): \n \n unread_tiff = rasterio.open(fpath_tiff)\n\n #Projecting the coordinates to that CRS \n proj = Proj(init='epsg:32618')\n data = []\n labels = []\n failed = []\n \n src = rasterio.open(fpath_tiff, 'r')\n outfolder = '/train/batch'\n \n print (\"Hold on tight! Mapping each image to its respective label...\")\n \n \n for num, row in labels_df.iterrows():\n try:\n \n \n roof_material_num = 0\n polygon0 = polygons [num]\n polygon0['coordinates'] = self.transforming_coordinates(polygon0['coordinates'], proj)\n masked_image, out_transform = rasterio.mask.mask(src,[polygon0], filled = True, crop=True, nodata = 0)\n img_image = reshape_as_image (masked_image)\n \n #Defining the name of the image file as \"buildingID+roofMaterial+png\" and its path \n img_path = os.path.join (outfolder, str (row['id'])+'-'+ str (row['roof_material'])+'.png')\n \n #swapping the color channels from RGB2BGR\n img_image = cv2.cvtColor (img_image, cv2.COLOR_RGB2BGR) #img_image is a numpy array\n \n #resizing the image dimensions to 128x128 to match ImageNet dimensions\n img_image = cv2.resize(img_image, (128, 128))\n \n #writing the image in the file\n #cv2.imwrite (img_path, img_image)\n # update the data and labels lists, respectively\n data.append(img_image) #data is a list\n labels.append(row['roof_material'])\n \n except Exception as e:\n print (e)\n failed.append (num)\n \n \n #print number of images we failed to crop and write \n print (\"Bad News First: Failed to write\", len(failed), \"Images.\")\n print (\"Good News: Successfully mapped\", len (data), \"Images.\")\n data = np.array(data)\n labels = np.array(labels)\n #batch = data.sample(frac=0.5, replace=False, random_state=1)\n #print(\"Size and shape of validY: {}\\n\".format(batch.shape))\n return data, labels", "def load_letter(folder,label,image_size=28,sample_num=-1):\n\n image_files = os.listdir(folder)\n dataset = np.ndarray(shape=(len(image_files), image_size, image_size),\n dtype=image_data_type)\n num_images = 0\n if sample_num == -1:\n sample_num = len(image_files)\n for image in image_files:\n image_file = os.path.join(folder, image)\n try:\n image_data = ndimage.imread(image_file).astype(image_data_type)\n if image_data.shape != (image_size, image_size):\n raise Exception('Unexpected image shape: %s' % str(image_data.shape))\n dataset[num_images, :, :] = image_data\n num_images = num_images + 1\n if num_images >= sample_num:\n break\n except IOError as e:\n print('Could not read:', image_file, ':', e, '- it\\'s ok, skipping.')\n\n dataset = dataset[0:num_images, :, :]\n data_label = np.ndarray(shape=(num_images), dtype=np.int8)\n data_label.fill(label)\n return dataset,data_label", "def extract_labels(filename, one_hot=False):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n magic = _read32(bytestream)\n if magic != 2049:\n raise ValueError(\n 'Invalid magic number %d in MNIST label file: %s' %\n (magic, filename))\n num_items = _read32(bytestream)[0]\n #print('check', magic, num_items)\n buf = bytestream.read(num_items)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8)\n if one_hot:\n return dense_to_one_hot(labels)\n return labels", "def load_tiny_imagenet(directory):\n path_train, path_val, path_test = directory + '/train', directory + '/val', directory + '/test'\n labels = os.listdir(path_train)\n train_data = []\n train_labels = []\n for label in labels:\n imgs_path = os.path.join(path_train, label, 'images')\n imgs = os.listdir(imgs_path)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n train_data.append(img)\n train_labels.append(label)\n train_data = np.concatenate(train_data)\n train_labels = np.array(train_labels, dtype='str')\n \n test_data = []\n test_labels = []\n with open(path_val+'/val_annotations.txt', 'r') as f:\n val_annotations = [line.strip().split('\\t') for line in f]\n val_annotations = np.array(val_annotations)\n imgs_path = os.path.join(path_val, 'images')\n imgs = os.listdir(imgs_path)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n test_data.append(img)\n label = val_annotations[val_annotations[:, 0] == img_name, 1].astype('U9')\n test_labels.append(label)\n test_data = np.concatenate(test_data)\n test_labels = np.concatenate(test_labels)\n test_labels = np.array(test_labels, dtype='str')\n \n _, train_labels = np.unique(train_labels, return_inverse=True)\n _, test_labels = np.unique(test_labels, return_inverse=True)\n \n del r, g, b, label, labels, imgs_path, img_name, img, imgs, val_annotations\n \n return train_data, train_labels, test_data, test_labels", "def load_matrix(self, src_dir, key_word=\"funneled\"):\r\n X = []\r\n Y = []\r\n label = 0\r\n for root, dirs, files in os.walk(src_dir):\r\n if files != []:\r\n for file in files:\r\n if key_word in file:\r\n img = cv2.imread(os.path.join(root, file), cv2.IMREAD_GRAYSCALE)\r\n min_value = np.min(img)\r\n max_value = np.max(img)\r\n X.append((img.flatten() - min_value)/(max_value - min_value)) # Normalize the data to [0, 1]\r\n Y.append(label)\r\n label +=1\r\n \r\n return dict(X = np.asarray(X), \r\n Y = np.asarray(Y))", "def createDictionaryFromFile(inputfile):\n logger.info('loading file: %s' % inputfile)\n dic = {}\n with open(inputfile) as fin:\n for n, line in enumerate(fin, start=1):\n arr = line.strip().split()\n path = arr[0]\n\n labels = []\n for label in arr[1:]:\n labels.append(ast.literal_eval(label))\n\n cpath = path.split('/')\n id_img = int(cpath[-1].replace('.jpg', ''))\n size_img = cpath[-2]\n activity = cpath[-3]\n id_data = int((cpath[-4])[-1])\n home = '/'.join(cpath[:-4])\n\n if dic.has_key(id_data):\n if dic[id_data].has_key(activity):\n if dic[id_data][activity].has_key(size_img):\n dic[id_data][activity][size_img][id_img] = labels\n else:\n dic[id_data][activity][size_img] = {id_img: labels}\n else:\n dic[id_data][activity] = {size_img: {id_img: labels}}\n else:\n dic[id_data] = {activity: {size_img: {id_img: labels}}}\n return n, home, dic", "def main():\n labels, data = load_image_data()\n print(labels.shape, data.shape)", "def hume_matfile_loader(matfile_path):\n mat_struct = loadmat(matfile_path)\n\n # build a list of keys and values for each entry in the structure\n vals = mat_struct['stageData'][0, 0] # <-- set the array you want to access.\n keys = mat_struct['stageData'][0, 0].dtype.descr\n\n # Assemble the keys and values into variables with the same name as that used in MATLAB\n mat_dict = {}\n for i in range(len(keys)):\n key = keys[i][0]\n if len(vals[key].shape) > 1 and vals[key].shape[0] > vals[key].shape[1]:\n vals[key] = vals[key].T\n if len(vals[key][0]) > 1:\n val = np.squeeze(vals[key][0])\n else:\n val = np.squeeze(vals[key][0][0]) # squeeze is used to covert matlat (1,n) arrays into numpy (1,) arrays.\n mat_dict[key] = val\n\n return mat_dict", "def get_output(path, label_file = None):\n img_id = path.split('/')[-1]\n labels = label_file.loc[img_id].values\n return labels", "def load_labels(path):\n with open(path, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n labels = []\n for row_number, content in enumerate(lines):\n pair = re.split(r'[:\\s]+', content.strip(), maxsplit=1)\n #if len(pair) == 2 and pair[0].strip().isdigit():\n labels.append(np.array([int(pair[0].strip()),pair[1].strip()]))\n #else:\n # labels.append(pair[0].strip())\n return np.array(labels)", "def read_dataset(data_txt_file, image_data_path):\n data = {}\n data['image'] = []\n data['label'] = []\n\n indexFile = open(data_txt_file, 'r')\n for sample in indexFile:\n sample = sample.split(',')\n\n _id = sample[0]\n label = int(sample[1])\n imageData = io.imread(image_data_path+_id+'.jpg')\n\n data['label'].append(label)\n data['image'].append(imageData)\n\n data['image'] = np.array(data['image'])\n data['label'] = np.array(data['label'])\n\n return data", "def read_data(case_dir):\n dict_images = dict()\n list_files = ['MR_512.nii.gz', 'landmarks_512.csv', ]\n # In fact, there is no Mask during inference, so we cannot load it.\n\n for file_name in list_files:\n file_path = case_dir + '/' + file_name\n assert os.path.exists(file_path), case_dir + ' does not exist!'\n\n if file_name.split('.')[-1] == 'csv':\n landmarks = pd.read_csv(file_path)\n dict_images['list_landmarks'] = landmark_extractor(landmarks)\n elif file_name.split('.')[0].split('_')[0] == 'MR':\n dict_images['MR'] = sitk.ReadImage(file_path, sitk.sitkFloat32)\n dict_images['MR'] = sitk.GetArrayFromImage(dict_images['MR'])[np.newaxis, :, :, :]\n elif file_name.split('.')[0].split('_')[0] == 'Mask':\n dict_images['Mask'] = sitk.ReadImage(file_path, sitk.sitkInt16)\n dict_images['Mask'] = sitk.GetArrayFromImage(dict_images['Mask'])[np.newaxis, :, :, :]\n\n return dict_images", "def get_label_dict(self):\n with open(self.labels_file_path, mode='r') as f:\n label_file = f.read()\n\n inverse_label_dict = json.loads(label_file)\n label_dict = {int(value): key for key,\n value in inverse_label_dict.items()}\n return label_dict", "def get_labels(self):\n\n print 'Loading label data from', self.label_file, '...'\n labels = {}\n with open(self.label_file, 'rb') as f:\n f.next() # skip header line\n for line in f:\n index, answer = line.rstrip('\\n').split(',')\n labels[index] = answer\n\n return labels", "def load_mnist(dataset=\"training\", digits=np.arange(10), path=\".\"):\n\n if dataset == \"training\":\n fname_img = os.path.join(path, 'train-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')\n elif dataset == \"testing\":\n fname_img = os.path.join(path, 't10k-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')\n else:\n raise ValueError(\"dataset must be 'testing' or 'training'\")\n\n flbl = open(fname_lbl, 'rb')\n magic_nr, size = struct.unpack(\">II\", flbl.read(8))\n lbl = pyarray(\"b\", flbl.read())\n flbl.close()\n\n fimg = open(fname_img, 'rb')\n magic_nr, size, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n img = pyarray(\"B\", fimg.read())\n fimg.close()\n\n ind = [ k for k in range(size) if lbl[k] in digits ]\n N = len(ind)\n\n images = zeros((N, rows, cols), dtype=uint8)\n labels = zeros((N, 1), dtype=int8)\n for i in range(len(ind)):\n images[i] = array(img[ ind[i]*rows*cols : (ind[i]+1)*rows*cols ]).reshape((rows, cols))\n labels[i] = lbl[ind[i]]\n\n return images, labels", "def build_features_dict(image, image_id, filename, image_format=None,\n bboxes=None, masks=None, label_ids=None,\n label_names=None, masks_format=\"png\"):\n\n # Add channel dimension if needed.\n if len(image.shape) == 3:\n pass\n elif len(image.shape) == 2:\n image = np.expand_dims(image, -1)\n else:\n raise Exception(f\"Wrong image shape: {image.shape}\")\n\n # Get image shape.\n image_width, image_height, image_channel = image.shape\n\n # Encode image.\n image_encoded = imaging.encode_image(image, image_format)\n\n # Create te feature dict.\n feature_dict = {}\n\n # Image features\n feature_dict['image_height'] = int64_feature(image_height)\n feature_dict['image_width'] = int64_feature(image_width)\n feature_dict['image_channel'] = int64_feature(image_channel)\n feature_dict['image_filename'] = bytes_feature(filename.encode('utf8'))\n feature_dict['image_id'] = bytes_feature(str(image_id).encode('utf8'))\n feature_dict['image_encoded'] = bytes_feature(image_encoded.numpy())\n feature_dict['image_format'] = bytes_feature(image_format.encode('utf8'))\n\n # Object features\n if bboxes is not None:\n if bboxes.shape[0] > 0:\n bboxes_x = bboxes[:, 0]\n bboxes_y = bboxes[:, 1]\n bboxes_width = bboxes[:, 2]\n bboxes_height = bboxes[:, 3]\n else:\n bboxes_x = []\n bboxes_y = []\n bboxes_width = []\n bboxes_height = []\n\n feature_dict['bboxes_x'] = float_list_feature(bboxes_x)\n feature_dict['bboxes_y'] = float_list_feature(bboxes_y)\n feature_dict['bboxes_width'] = float_list_feature(bboxes_width)\n feature_dict['bboxes_height'] = float_list_feature(bboxes_height)\n\n if label_ids is not None:\n feature_dict['label_ids'] = int64_list_feature(label_ids)\n\n if label_names is not None:\n feature_dict['label_names'] = bytes_list_feature(label_names)\n\n if masks is not None:\n # Encode masks.\n masks_encoded = []\n for mask in masks:\n mask = image = np.expand_dims(mask, -1)\n mask_encoded = imaging.encode_image(mask, masks_format)\n masks_encoded.append(mask_encoded.numpy())\n\n feature_dict['masks_encoded'] = bytes_list_feature(masks_encoded)\n feature_dict['masks_format'] = bytes_feature(masks_format.encode(\"utf8\"))\n\n return feature_dict", "def label_visualize(img_dir):\n img = scipy.misc.imread(img_dir).astype(np.uint8)\n yo = np.nonzero(img == 1)\n visual = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n\n for i in range(0, 34):\n index = np.nonzero(img == i)\n visual[index + (0,)] = labels[i][0]\n visual[index + (1,)] = labels[i][1]\n visual[index + (2,)] = labels[i][2]\n\n scipy.misc.imsave('./' + img_dir.split('/')[-1], visual)", "def get_pet_labels(image_dir):\n results_dic = dict()\n \n# # Retrieves the file names from the folder specified as 'image_dir' \n filenames_list = listdir(image_dir)\n \n# # Processes the filenames to create the pet image labels\n# # Retrieves the filenames from folder pet_images/\n for i in range (0, len(filenames_list), 1):\n# # Skips file if starts with . (like .DS_Store of Mac OSX) because it \n# # isn't an pet image file\n if filenames_list[i][0] != \".\":\n# # Reads respectively indexed element from filenames_list into temporary string variable 'pet_image' \n pet_image = filenames_list[i]\n# # Sets all characters in 'pet_image' to lower case \n pet_image_lower = pet_image.lower()\n# # Creates list called 'pet_image_word_list' that contains every element in pet_image_lower seperated by '_'\n pet_image_word_list = pet_image_lower.split(\"_\")\n# # Creates temporary variable 'pet_label' to hold pet label name extracted starting as empty string\n pet_image_alpha = \"\"\n# # Iterates through every word in 'pet_image_word_list' and appends word to 'pet_label_alpha' only if word consists \n# # purely of alphabetic characters \n for word in pet_image_word_list:\n if word.isalpha():\n pet_image_alpha += word + \" \"\n# # Removes possible leading or trailing whitespace characters from 'pet_pet_image_alpha' and add stores final label as 'pet_label' \n pet_label = pet_image_alpha.strip()\n\n# # Adds the original filename as 'key' and the created pet_label as 'value' to the 'results_dic' dictionary if 'key' does \n# # not yet exist in 'results_dic', otherwise print Warning message \n if filenames_list[i] not in results_dic:\n results_dic[filenames_list[i]] = [pet_label]\n else:\n print(\"** Warning: Key = \", filenames_list[i], \" already in 'results_dic' with value = \", results_dic[filenames_list[i]])\n \n# # Iterates through the 'results_dic' dictionary and prints its keys and their associated values\n print(\"\\nPrinting: All 'key' - 'value' pairs in dictionary results_dic: \")\n for key in results_dic:\n print(\"Filename = \", key, \" Pet Label = \", results_dic[key])\n \n# # Returns results_dic\n return results_dic", "def decode_labels(mask, num_images=1, num_classes=21, task='seg'):\n n, h, w, c = mask.shape\n assert (n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images)\n outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)\n for i in range(num_images):\n if task == 'normal':\n outputs[i] = mask[i]\n elif task == 'seg':\n img = Image.new('RGB', (w, h), (255, 255, 255)) # unlabeled part is white (255, 255, 255)\n pixels = img.load()\n for j_, j in enumerate(mask[i, :, :, 0]):\n for k_, k in enumerate(j):\n if k < num_classes:\n pixels[k_, j_] = label_colours[k]\n outputs[i] = np.array(img)\n else:\n raise Exception('task name is not recognized!')\n\n return outputs", "def load_imgsLabels(self, image_paths):\n \n# label = image_paths[-1]\n \n images = self.load_images(image_paths)\n \n images = self.resize_images(images)\n \n images_list = self.greyscale_images(images)\n\n return images_list", "def load_imagenet(directory):\n path_train, path_val = directory + '/ILSVRC2012_img_train', directory + '/ILSVRC2012_img_val'\n train_labels = os.listdir(path_train)\n train_data = []\n for label in train_labels:\n imgs_path = os.path.join(path_train, label)\n imgs = os.listdir(imgs_path)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n train_data.append(img)\n train_labels.append(label)\n train_data = np.concatenate(train_data)\n train_labels = np.array(train_labels, dtype='str')\n \n test_labels = os.listdir(path_val)\n test_data = []\n for label in test_labels:\n imgs_path = os.path.join(path_val, label)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n test_data.append(img)\n test_labels.append(label)\n test_data = np.concatenate(test_data)\n test_labels = np.array(test_labels, dtype='str')\n \n _, train_labels = np.unique(train_labels, return_inverse=True)\n _, test_labels = np.unique(test_labels, return_inverse=True)\n \n del r, g, b, imgs_path, img_name, img, imgs\n \n return train_data, train_labels, test_data, test_labels", "def load_data(data_file):\n data = pickle.load(open(data_file, \"rb\"))\n images = data[\"images\"]\n labels = data[\"labels\"]\n\n return images, labels" ]
[ "0.7442581", "0.67145514", "0.6680717", "0.66700083", "0.6651974", "0.6599294", "0.65706545", "0.6568262", "0.65624034", "0.65466106", "0.6527709", "0.65229243", "0.65100825", "0.6500305", "0.649048", "0.6466592", "0.6466018", "0.6442053", "0.6429563", "0.6409631", "0.63989353", "0.6398331", "0.6392108", "0.63798136", "0.63793224", "0.63647455", "0.6362771", "0.63621897", "0.6351548", "0.6340121", "0.6311418", "0.63105685", "0.6301381", "0.6298731", "0.629819", "0.62820655", "0.6281838", "0.6269306", "0.6267734", "0.62660563", "0.62660563", "0.62610793", "0.62308514", "0.62302977", "0.62213755", "0.62192297", "0.62170714", "0.62042874", "0.6204238", "0.6200295", "0.6173856", "0.6173856", "0.61581635", "0.61481947", "0.61384934", "0.61376095", "0.61330664", "0.6125069", "0.61185175", "0.61180514", "0.6090818", "0.6082656", "0.6071819", "0.6068334", "0.6067384", "0.6058544", "0.60574967", "0.6052241", "0.6052068", "0.6048583", "0.60455567", "0.60393196", "0.6034254", "0.6014672", "0.60122997", "0.5985232", "0.5973246", "0.5965362", "0.59591544", "0.59546065", "0.59541255", "0.59513205", "0.5950326", "0.59467643", "0.5935829", "0.5924551", "0.5921776", "0.5919206", "0.59134555", "0.5908433", "0.5904006", "0.58992493", "0.58726734", "0.58699334", "0.58655876", "0.5853104", "0.58282125", "0.5825043", "0.5816673", "0.5813983" ]
0.8415674
0
"ref CLRS pg326, solution to the basic supply chain problem using the book notation for variables na(...TRUNCATED)
"def fastestWay( a, t, e, x, n ):\n import pdb;pdb.set_trace() \n f1.append( ( e[0] , 1 ) )\n (...TRUNCATED)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
["def exercise_b2_39():\r\n pass","def exercise_b2_113():\r\n pass","def exercise_b2_93():\r\n(...TRUNCATED)
["0.5789567","0.5612758","0.56002617","0.5582453","0.5527549","0.5454671","0.5450963","0.5440792","0(...TRUNCATED)
0.0
-1
This function computes the fundamental matrix by computing the SVD of Ax = 0 ; 8point algorithm
"def computeFundamentalMatrix(pts1, pts2):\n A = np.empty((8, 9))\n for i in range(len(pts1)-1(...TRUNCATED)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
["def svd0(A):\n M,N = A.shape\n if M>N: return sla.svd(A, full_matrices=True)\n else: return s(...TRUNCATED)
["0.66048837","0.6466162","0.6259937","0.6250825","0.62505597","0.62274474","0.6104567","0.6089218",(...TRUNCATED)
0.68444854
0
"Leverages the 8point algorithm and implement RANSAC algorithm to find the inliers and the best fund(...TRUNCATED)
"def getInlierRANSAC(pts1, pts2):\n # global finalFundamentalMatrix\n iterations = 50\n thr(...TRUNCATED)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
["def ransac(data, hypothesis, metric, sample_size, num_iter, inlier_thresh):\n N,d = data.shape\(...TRUNCATED)
["0.6203597","0.5916464","0.5894118","0.5867515","0.5715989","0.56956524","0.56905115","0.5686345","(...TRUNCATED)
0.699575
0
"=========================================================== DateFormatedSQL(x) ====================(...TRUNCATED)
"def DateFormatedSQL(x):\n x=[i[0] for i in x]\n \n x1=[]\n for i in x:\n if len((...TRUNCATED)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
["def change_format_from_input_to_datetime(list_d_t_t):\n data_output = []\n\n for row in list(...TRUNCATED)
["0.66734904","0.65000284","0.6259414","0.59757656","0.5600508","0.5579302","0.5578522","0.5551475",(...TRUNCATED)
0.7940835
0
"=========================================================== dateformated(x) =======================(...TRUNCATED)
"def DateFormated(x):\n \n x1=[]\n for i in x:\n if len(i)==19:\n x1.appe(...TRUNCATED)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
["def change_format_from_input_to_datetime(list_d_t_t):\n data_output = []\n\n for row in list(...TRUNCATED)
["0.73220545","0.6644235","0.64673054","0.63785565","0.6323779","0.63159305","0.6256937","0.6174311"(...TRUNCATED)
0.75249213
0
Mimic the & operator in R. This has to have Expression objects to be involved to work
"def _op_and_(self, left: Any, right: Any) -> Any:\n if isinstance(left, list):\n (...TRUNCATED)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
["def AND(f, g):\n def _and(x):\n return f(x) & g(x)\n return _and","def and_(a, b):","(...TRUNCATED)
["0.6913351","0.6844835","0.6834847","0.68041515","0.6614185","0.6585983","0.65602845","0.65299505",(...TRUNCATED)
0.62697506
20
Mimic the & operator in R. This has to have Expression objects to be involved to work
"def _op_or_(self, left: Any, right: Any) -> Any:\n if isinstance(left, list):\n r(...TRUNCATED)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
["def AND(f, g):\n def _and(x):\n return f(x) & g(x)\n return _and","def and_(a, b):","(...TRUNCATED)
["0.6913351","0.6844835","0.6834847","0.68041515","0.6614185","0.6585983","0.65602845","0.65299505",(...TRUNCATED)
0.0
-1
End of preview. Expand in Data Studio

Cornstack Python v1 Filtered

The Cornstack Python v1 Filtered dataset is derived from the nomic-ai/cornstack-python-v1 dataset by limiting queries to a maximum of 17 words and restricting the total number of rows to 423259. This dataset is suitable for Python programming education and question-answering applications.

Note: If you would like to contribute to this repository, please read the CONTRIBUTING first.


TableofContents

Features

  • Name: Cornstack Python v1 Filtered
  • Primary Purpose: Contains query-document pairs with corresponding Python code implementations, focusing primarily on matrix and vector operations (e.g., matrix-vector multiplication, circulant and Toeplitz matrices), along with associated negative samples for machine learning tasks like code retrieval and similarity modeling.
  • Language: English
  • Format: JSONL
  • License: GPL-3.0

File Structure

.
├── CONTRIBUTING.md
├── .gitattributes
├── .gitignore
├── LICENSE
├── README.md
├── shard-00.jsonl.gz
├── shard-01.jsonl.gz
├── shard-02.jsonl.gz
├── shard-03.jsonl.gz
└── shard-04.jsonl.gz

1 directory, 10 files

Metadata

Data Dictionary

The dataset contains pairs of queries and documents with associated metadata, negative examples, and scoring information.

CSV
Column Description Type
query Textual query or instruction string
document Relevant code snippet or textual response string
negatives List of non-relevant code snippets list[string]
metadata JSON object containing additional structured information JSON object
negative_scores List of scores corresponding to each negative example list[float]
document_score Score for the document float
document_rank Rank or category label for the document string
Example row (CSV):
query document negatives negative_scores document_score document_rank metadata
Compute the matrixvector product y = Cu where C is a circulant matrix All matrices are real def circulant_multiplication(u, a): return real(ifft(fft(a)*fft(u))) ['def covar(fx,cx): ...', 'def matmul(self, q: np.ndarray): ...'] [0.7675772, 0.6984068] 0.69579995 2 {"objective": {"self": [], "paired": [], "triplet": [["query", "document", "negatives"]]}}
JSON Lines

Each line represents one JSON object with the following structure:

{
  "query": "string, textual query or instruction",
  "document": "string, relevant code snippet or textual response",
  "negatives": ["list of strings, non-relevant code snippets"],
  "negative_scores": ["list of floats, scores for each negative example"],
  "document_score": "float, score for the document",
  "document_rank": "string, rank or category label",
  "metadata": {
      "objective": {
          "self": "list, self-related metadata (often empty)",
          "paired": "list, pairwise metadata (often empty)",
          "triplet": [["query", "document", "negatives"]]
      }
  }
}
Example row (JSONL):
{
  "query":"Compute the matrixvector product y = Cu where C is a circulant matrix All matrices are real",
  "document":"def circulant_multiplication(u, a): return real(ifft(fft(a)*fft(u)))",
  "negatives":[
    "def covar(fx,cx): ...",
    "def __matmul__(self, q: np.ndarray): ..."
  ],
  "negative_scores":[
    0.7675772,
    0.6984068
  ],
  "document_score":0.69579995,
  "document_rank":"2",
  "metadata":{
    "objective":{
      "self":[ ],
      "paired":[ ],
      "triplet":[
        [
          "query",
          "document",
          "negatives"
        ]
      ]
    }
  }
}

Usage

Hugging Face
from datasets import load_dataset

# 141k:
dataset_141k = load_dataset("bunyaminergen/cornstack-python-v1-filtered", revision="v3", split="train")
print(dataset_141k[0])

# 282k:
dataset_282k = load_dataset("bunyaminergen/cornstack-python-v1-filtered", revision="v5", split="train")
print(dataset_282k[0])

# 423k:
dataset_423k = load_dataset("bunyaminergen/cornstack-python-v1-filtered", revision="v7", split="train")
print(dataset_423k[0])

Versioning

  • v3: 141k version
  • v5: 282k version
  • v7: 423k version

Licence


Team


Contact


Reference

This dataset is derived from the original dataset nomic-ai/cornstack-python-v1.


Citation

@misc{           CornstackPythonv1Filtered,
  author       = {Bunyamin Ergen},
  title        = {CornstackPythonv1Filtered},
  year         = {2025},
  month        = {03},
  url          = {https://huggingface.co/datasets/bunyaminergen/cornstack-python-v1-filtered},
}

Downloads last month
31