ganguli-lab/proxalgs · operators.py ```def poissreg(x0, rho, x, y):
"""
Proximal operator for Poisson regression

Computes the proximal operator of the negative log-likelihood loss assumping a Poisson noise distribution.

Parameters
----------
x0 : array_like
The starting or initial point used in the proximal update step

rho : float
Momentum parameter for the proximal step (larger value -> stays closer to x0)

x : (n, k) array_like
A design matrix consisting of n examples of k-dimensional features (or input).

y : (n,) array_like
A vector containing the responses (outupt) to the n features given in x.

Returns
-------
theta : array_like
The parameter vector found after running the proximal update step
"""

n = float(x.shape)
f = lambda w: np.mean(np.exp(x.dot(w)) - y * x.dot(w))
df = lambda w: (x.T.dot(np.exp(x.dot(w))) - x.T.dot(y)) / n

# minimize via BFGS
return bfgs(x0, rho, f, df)
```
Similar code snippets
1.
LCAV/pylocus · opt_space.py
Match rating: 60.49% · See similar code snippets ```def gradF_t(X, Y, S, M_E, E, m0, rho):
'''
n, r = X.shape
m, r = Y.shape

XS = np.dot(X, S)
YS = np.dot(Y, S.T)
XSY = np.dot(XS, Y.T)

Qx = np.dot(np.dot(X.T, ((M_E - XSY) * E)), YS) / n
Qy = np.dot(np.dot(Y.T, ((M_E - XSY) * E).T), XS) / m

W = np.dot((XSY - M_E) * E, YS) + np.dot(X, Qx) + rho * Gp(X, m0, r)
Z = np.dot(((XSY - M_E) * E).T, XS) + np.dot(Y, Qy) + rho * Gp(Y, m0, r)

return W, Z
```
2.
atarashansky/self-assembling-manifold · utilities.py
Match rating: 52.98% · See similar code snippets ```def generate_correlation_map(x, y):
mu_x = x.mean(1)
mu_y = y.mean(1)
n = x.shape
if n != y.shape:
raise ValueError('x and y must ' +
'have the same number of timepoints.')
s_x = x.std(1, ddof=n - 1)
s_y = y.std(1, ddof=n - 1)
cov = np.dot(x, y.T) - n * np.dot(mu_x[:, None], mu_y[None, :])
return cov / np.dot(s_x[:, None], s_y[None, :])
```
3.
ulf1/oxyba · linreg_ridge_gd.py
Match rating: 52.94% · See similar code snippets ```def linreg_ridge_gd(y, X, lam, algorithm='L-BFGS-B', debug=False):
"""Ridge Regression with Gradient Optimization methods

Parameters:
-----------
y : ndarray
target variable with N observations

X : ndarray
The <N x C> design matrix with C independent
variables, features, factors, etc.

algorithm : str
Optional. The algorithm used in scipy.optimize.minimize
and 'L-BFGS-B' (Limited BFGS) as default.

Eligible algorithms are 'CG', 'BFGS', 'Newton-CG', 'L-BFGS-B',
'TNC', and 'SLSQP' as these use the supplied gradient function.

This is an unconstrained optimization problem. Thus, the 'L-BFGS-B',
'TNC' and 'SLSQP' options does not make use of constraints.

'TNC' (Truncated Newton) seems to be suited to for larger datasets
and 'L-BFGS-B' (Limited BFGS) if computing powever becomes an issue.

debug : bool
Optional.

Returns:
--------
beta : ndarray
Estimated regression coefficients.

results : scipy.optimize.optimize.OptimizeResult
Optional. If debug=True then only scipy's
optimization result variable is returned.
"""
import numpy as np
import scipy.optimize as sopt

def objective_pssr(theta, y, X, lam):
return np.sum((y - np.dot(X, theta))**2) + lam * np.sum(theta**2)

return -2.0 * np.dot(X.T, (y - np.dot(X, theta))) + 2.0 * lam * theta

# check eligible algorithm
if algorithm not in ('CG', 'BFGS', 'Newton-CG',
'L-BFGS-B', 'TNC', 'SLSQP'):
raise Exception('Optimization Algorithm not supported.')

# set start values
theta0 = np.ones((X.shape,))

# run solver
results = sopt.minimize(
objective_pssr,
theta0,
args=(y, X, lam),
method=algorithm,
options={'disp': False})

# debug?
if debug:
return results

# done
return results.x
```
4.
ulf1/oxyba · linreg_util.py
Match rating: 52.85% · See similar code snippets ```def linreg_ssr(y, X, beta):
import numpy as np
return np.sum((y - np.dot(X, beta))**2)
```
5.
Match rating: 52.8% · See similar code snippets ```def convex_init(X, Y, niter=100, reg=0.05, apply_sqrt=False):
n, d = X.shape
if apply_sqrt:
X, Y = sqrt_eig(X), sqrt_eig(Y)
K_X, K_Y = np.dot(X, X.T), np.dot(Y, Y.T)
K_Y *= np.linalg.norm(K_X) / np.linalg.norm(K_Y)
K2_X, K2_Y = np.dot(K_X, K_X), np.dot(K_Y, K_Y)
P = np.ones([n, n]) / float(n)
for it in range(1, niter + 1):
G = np.dot(P, K2_X) + np.dot(K2_Y, P) - 2 * np.dot(K_Y, np.dot(P, K_X))
q = ot.sinkhorn(np.ones(n), np.ones(n), G, reg, stopThr=1e-3)
alpha = 2.0 / float(2.0 + it)
P = alpha * q + (1.0 - alpha) * P
obj = np.linalg.norm(np.dot(P, K_X) - np.dot(K_Y, P))
print(obj)
return procrustes(np.dot(P, X), Y).T
```
6.
xingjiepan/cylinder_fitting · fitting.py
Match rating: 52.71% · See similar code snippets ```def G(w, Xs):
'''Calculate the G function given a cylinder direction w and a
list of data points Xs to be fitted.'''
n = len(Xs)
P = projection_matrix(w)
Ys = [np.dot(P, X) for X in Xs]
A = calc_A(Ys)
A_hat = calc_A_hat(A, skew_matrix(w))

u = sum(np.dot(Y, Y) for Y in Ys) / n
v = np.dot(A_hat, sum(np.dot(Y, Y) * Y for Y in Ys)) / np.trace(np.dot(A_hat, A))

return sum((np.dot(Y, Y) - u - 2 * np.dot(Y, v)) ** 2 for Y in Ys)
```
7.
Match rating: 52.61% · See similar code snippets ```def objective(X, Y, R, n=5000):
Xn, Yn = X[:n], Y[:n]
C = -np.dot(np.dot(Xn, R), Yn.T)
P = ot.sinkhorn(np.ones(n), np.ones(n), C, 0.025, stopThr=1e-3)
return 1000 * np.linalg.norm(np.dot(Xn, R) - np.dot(P, Yn)) / n
```
8.
robertmartin8/PyPortfolioOpt · cla.py
Match rating: 51.91% · See similar code snippets ```def compute_w(self, covarF_inv, covarFB, meanF, wB):
# 1) compute gamma
onesF = np.ones(meanF.shape)
g1 = np.dot(np.dot(onesF.T, covarF_inv), meanF)
g2 = np.dot(np.dot(onesF.T, covarF_inv), onesF)
if wB is None:
g, w1 = float(-self.l[-1] * g1 / g2 + 1 / g2), 0
else:
onesB = np.ones(wB.shape)
g3 = np.dot(onesB.T, wB)
g4 = np.dot(covarF_inv, covarFB)
w1 = np.dot(g4, wB)
g4 = np.dot(onesF.T, w1)
g = float(-self.l[-1] * g1 / g2 + (1 - g3 + g4) / g2)
# 2) compute weights
w2 = np.dot(covarF_inv, onesF)
w3 = np.dot(covarF_inv, meanF)
return -w1 + g * w2 + self.l[-1] * w3, g
```
9.
pysal/spglm · glm.py
Match rating: 51.73% · See similar code snippets ```def tr_S(self):
xtx_inv = np.linalg.inv(np.dot(self.X.T, self.X))
xtx_inv_xt = np.dot(xtx_inv, self.X.T)
S = np.dot(self.X, xtx_inv_xt)
return np.trace(S)
```
10.
LCAV/pylocus · opt_space.py
Match rating: 51.61% · See similar code snippets ```def F_t(X, Y, S, M_E, E, m0, rho):
''' Compute the distortion '''
r = X.shape
out1 = (((np.dot(np.dot(X, S), Y.T) - M_E) * E)**2).sum() / 2
out2 = rho * G(Y, m0, r)
out3 = rho * G(X, m0, r)

return out1 + out2 + out3
```