mattjj/pyslds · util.py
python logo
def diag_regression_logprior(regression):
    from scipy.stats import multivariate_normal, gamma
    A = regression.A
    sigmasq = regression.sigmasq_flat
    J, h, alpha, beta = \
        regression.J_0, regression.h_0, regression.alpha_0, regression.beta_0
    Sigma = np.linalg.inv(J)
    mu = Sigma.dot(h)

    lp = 0
    for d in range(regression.D_out):
        lp += multivariate_normal(mu, Sigma).logpdf(A[d])
        lp += gamma(alpha, scale=1./beta).logpdf(1. / sigmasq[d])
    return lp
Similar code snippets
1.
mattjj/pyslds · util.py
Match rating: 63.6% · See similar code snippets
python logo
def regression_logprior(regression):
    if isinstance(regression, DiagonalRegression):
        return diag_regression_logprior(regression)
    elif isinstance(regression, Regression):
        return dense_regression_logprior(regression)
2.
RJT1990/pyflux · var.py
Match rating: 47.42% · See similar code snippets
python logo
def estimator_cov(self,method):
        """ Creates covariance matrix for the estimators

        Parameters
        ----------
        method : str
            Estimation method

        Returns
        ----------
        A Covariance Matrix
        """         
        
        Y = np.array([reg[self.lags:] for reg in self.data])    
        Z = self._create_Z(Y)
        if method == 'OLS':
            sigma = self.ols_covariance()
        else:           
            sigma = self.custom_covariance(self.latent_variables.get_z_values())
        return np.kron(np.linalg.inv(np.dot(Z,np.transpose(Z))), sigma)
3.
laurencium/Causalinference · tools.py
Match rating: 47.35% · See similar code snippets
python logo
def random_data(N=5000, K=3, unobservables=False, **kwargs):

	"""
	Function that generates data according to one of two simple models that
	satisfies the unconfoundedness assumption.

	The covariates and error terms are generated according to
		X ~ N(mu, Sigma), epsilon ~ N(0, Gamma).

	The counterfactual outcomes are generated by
		Y0 = X*beta + epsilon_0,
		Y1 = delta + X*(beta+theta) + epsilon_1.

	Selection is done according to the following propensity score function:
		P(D=1|X) = Lambda(X*beta).

	Here Lambda is the standard logistic CDF.

	Parameters
	----------
	N: int
		Number of units to draw. Defaults to 5000.
	K: int
		Number of covariates. Defaults to 3.
	unobservables: bool
		Returns potential outcomes and true propensity score
		in addition to observed outcome and covariates if True.
		Defaults to False.
	mu, Sigma, Gamma, beta, delta, theta: NumPy ndarrays, optional
		Parameter values appearing in data generating process.

	Returns
	-------
	tuple
		A tuple in the form of (Y, D, X) or (Y, D, X, Y0, Y1) of
		observed outcomes, treatment indicators, covariate matrix,
		and potential outomces.
	"""

	mu = kwargs.get('mu', np.zeros(K))
	beta = kwargs.get('beta', np.ones(K))
	theta = kwargs.get('theta', np.ones(K))
	delta = kwargs.get('delta', 3)
	Sigma = kwargs.get('Sigma', np.identity(K))
	Gamma = kwargs.get('Gamma', np.identity(2))

	X = np.random.multivariate_normal(mean=mu, cov=Sigma, size=N)
	Xbeta = X.dot(beta)
	pscore = logistic.cdf(Xbeta)
	D = np.array([np.random.binomial(1, p, size=1) for p in pscore]).flatten()

	epsilon = np.random.multivariate_normal(mean=np.zeros(2), cov=Gamma, size=N)
	Y0 = Xbeta + epsilon[:,0]
	Y1 = delta + X.dot(beta+theta) + epsilon[:,1]
	Y = (1-D)*Y0 + D*Y1

	if unobservables:
		return Y, D, X, Y0, Y1, pscore
	else:
		return Y, D, X
4.
rlabbe/filterpy · kalman_filter.py
Match rating: 47.17% · See similar code snippets
python logo
def predict(x, P, F=1, Q=0, u=0, B=1, alpha=1.):
    """
    Predict next state (prior) using the Kalman filter state propagation
    equations.

    Parameters
    ----------

    x : numpy.array
        State estimate vector

    P : numpy.array
        Covariance matrix

    F : numpy.array()
        State Transition matrix

    Q : numpy.array, Optional
        Process noise matrix


    u : numpy.array, Optional, default 0.
        Control vector. If non-zero, it is multiplied by B
        to create the control input into the system.

    B : numpy.array, optional, default 0.
        Control transition matrix.

    alpha : float, Optional, default=1.0
        Fading memory setting. 1.0 gives the normal Kalman filter, and
        values slightly larger than 1.0 (such as 1.02) give a fading
        memory effect - previous measurements have less influence on the
        filter's estimates. This formulation of the Fading memory filter
        (there are many) is due to Dan Simon

    Returns
    -------

    x : numpy.array
        Prior state estimate vector

    P : numpy.array
        Prior covariance matrix
    """

    if np.isscalar(F):
        F = np.array(F)
    x = dot(F, x) + dot(B, u)
    P = (alpha * alpha) * dot(dot(F, P), F.T) + Q

    return x, P
5.
ANTsX/ANTsPy · quantile.py
Match rating: 47.06% · See similar code snippets
python logo
def regress_poly(degree, data, remove_mean=True, axis=-1):
    """
    Returns data with degree polynomial regressed out.
    :param bool remove_mean: whether or not demean data (i.e. degree 0),
    :param int axis: numpy array axes along which regression is performed
    """
    timepoints = data.shape[0]
    # Generate design matrix
    X = np.ones((timepoints, 1))  # quick way to calc degree 0
    for i in range(degree):
        polynomial_func = Legendre.basis(i + 1)
        value_array = np.linspace(-1, 1, timepoints)
        X = np.hstack((X, polynomial_func(value_array)[:, np.newaxis]))
    non_constant_regressors = X[:, :-1] if X.shape[1] > 1 else np.array([])
    betas = np.linalg.pinv(X).dot(data)
    if remove_mean:
        datahat = X.dot(betas)
    else:  # disregard the first layer of X, which is degree 0
        datahat = X[:, 1:].dot(betas[1:, ...])
    regressed_data = data - datahat
    return regressed_data, non_constant_regressors
6.
SheffieldML/GPy · expectation_propagation.py
Match rating: 45.83% · See similar code snippets
python logo
def _recompute(LLT0, Kmn, ga_approx):
        LLT = LLT0 + np.dot(Kmn*ga_approx.tau[None,:],Kmn.T)
        L = jitchol(LLT)
        V, _ = dtrtrs(L,Kmn,lower=1)
        #Sigma_diag = np.sum(V*V,-2)
        #Knmv_tilde = np.dot(Kmn,v_tilde)
        #mu = np.dot(V2.T,Knmv_tilde)
        Sigma = np.dot(V.T,V)
        mu = np.dot(Sigma, ga_approx.v)
        Sigma_diag = np.diag(Sigma).copy()
        return posteriorParamsDTC(mu, Sigma_diag), LLT
7.
alimuldal/PyFNND · _fnndeconv.py
Match rating: 45.78% · See similar code snippets
python logo
def _init_theta(F, theta0, offset, dt=0.02, rate=1., tau=1.0, decimate=0):

    if decimate > 0:
        F = F[:, ::decimate]

    npix = F.shape[0]
    sigma, alpha, beta, lamb, gamma = theta0

    if None in (sigma, alpha, beta):
        med_F = np.median(F, axis=1)

    # noise parameter
    if sigma is None:
        # K is the correction factor when using the median absolute deviation
        # as a robust estimator of the standard deviation of a normal
        # distribution <http://en.wikipedia.org/wiki/Median_absolute_deviation>
        K = 1.4826
        mad = np.median(np.abs(F - med_F[:, None]))
        sigma = mad * K                         # scalar

    # amplitude
    if alpha is None:
        alpha = np.atleast_1d(med_F)            # vector

    # baseline
    if beta is None:
        if npix == 1:
            beta = np.atleast_1d(np.percentile(F, 5., axis=1))
        else:
            beta = np.atleast_1d(med_F)
    else:
        # beta should absorb the offset parameter
        beta = np.atleast_1d(beta - offset)

    # firing rate
    if lamb is None:
        lamb = rate                             # scalar

    # decay parameter (fraction of remaining fluorescence after one time step)
    if gamma is None:
        gamma = np.exp(-dt / tau)               # scalar

    return sigma, alpha, beta, lamb, gamma
8.
rlabbe/filterpy · stats.py
Match rating: 44.86% · See similar code snippets
python logo
def log_likelihood(z, x, P, H, R):
    """
    Returns log-likelihood of the measurement z given the Gaussian
    posterior (x, P) using measurement function H and measurement
    covariance error R
    """
    S = np.dot(H, np.dot(P, H.T)) + R
    return logpdf(z, np.dot(H, x), S)
9.
QuantEcon/QuantEcon.py · kalman.py
Match rating: 44.75% · See similar code snippets
python logo
def prior_to_filtered(self, y):
        r"""
        Updates the moments (x_hat, Sigma) of the time t prior to the
        time t filtering distribution, using current measurement :math:`y_t`.

        The updates are according to

        .. math::

            \hat{x}^F = \hat{x} + \Sigma G' (G \Sigma G' + R)^{-1}
                (y - G \hat{x})
            \Sigma^F = \Sigma - \Sigma G' (G \Sigma G' + R)^{-1} G
                \Sigma

        Parameters
        ----------
        y : scalar or array_like(float)
            The current measurement

        """
        # === simplify notation === #
        G, H = self.ss.G, self.ss.H
        R = np.dot(H, H.T)

        # === and then update === #
        y = np.atleast_2d(y)
        y.shape = self.ss.k, 1
        E = dot(self.Sigma, G.T)
        F = dot(dot(G, self.Sigma), G.T) + R
        M = dot(E, inv(F))
        self.x_hat = self.x_hat + dot(M, (y - dot(G, self.x_hat)))
        self.Sigma = self.Sigma - dot(M, dot(G,  self.Sigma))
10.
ganguli-lab/proxalgs · operators.py
Match rating: 44.73% · See similar code snippets
python logo
def poissreg(x0, rho, x, y):
    """
    Proximal operator for Poisson regression

    Computes the proximal operator of the negative log-likelihood loss assumping a Poisson noise distribution.

    Parameters
    ----------
    x0 : array_like
        The starting or initial point used in the proximal update step

    rho : float
        Momentum parameter for the proximal step (larger value -> stays closer to x0)

    x : (n, k) array_like
        A design matrix consisting of n examples of k-dimensional features (or input).

    y : (n,) array_like
        A vector containing the responses (outupt) to the n features given in x.

    Returns
    -------
    theta : array_like
        The parameter vector found after running the proximal update step
    """

    # objective and gradient
    n = float(x.shape[0])
    f = lambda w: np.mean(np.exp(x.dot(w)) - y * x.dot(w))
    df = lambda w: (x.T.dot(np.exp(x.dot(w))) - x.T.dot(y)) / n

    # minimize via BFGS
    return bfgs(x0, rho, f, df)