tensorflow/probability · semilocal_linear_trend.py
python logo
def semilocal_linear_trend_transition_matrix(autoregressive_coef):
  """Build the transition matrix for a semi-local linear trend model."""
  # We want to write the following 2 x 2 matrix:
  #  [[1., 1., ],    # level(t+1) = level(t) + slope(t)
  #   [0., ar_coef], # slope(t+1) = ar_coef * slope(t)
  # but it's slightly tricky to properly incorporate the batch shape of
  # autoregressive_coef. E.g., if autoregressive_coef has shape [4,6], we want
  # to return shape [4, 6, 2, 2]. We do this by breaking the matrix into its
  # fixed entries, written explicitly, and then the autoregressive_coef part
  # which we add in after using a mask to broadcast to the correct matrix shape.

  fixed_entries = tf.constant(
      [[1., 1.],
       [0., 0.]],

  autoregressive_coef_mask = tf.constant([[0., 0.],
                                          [0., 1.]],
  bottom_right_entry = (autoregressive_coef[..., tf.newaxis, tf.newaxis] *
  return tf.linalg.LinearOperatorFullMatrix(
      fixed_entries + bottom_right_entry)
Similar code snippets
tensorflow/tensor2tensor · bayes.py
Match rating: 44.78% · See similar code snippets
python logo
def fit(self, x=None, y=None):
    # p(coeffs | x, y) = Normal(coeffs |
    #   mean = (1/noise_variance) (1/noise_variance x^T x + I)^{-1} x^T y,
    #   covariance = (1/noise_variance x^T x + I)^{-1})
    # TODO(trandustin): We newly fit the data at each call. Extend to do
    # Bayesian updating.
    kernel_matrix = tf.matmul(x, x, transpose_a=True) / self.noise_variance
    coeffs_precision = tf.matrix_set_diag(
        kernel_matrix, tf.matrix_diag_part(kernel_matrix) + 1.)
    coeffs_precision_tril = tf.linalg.cholesky(coeffs_precision)
    self.coeffs_precision_tril_op = tf.linalg.LinearOperatorLowerTriangular(
    self.coeffs_mean = self.coeffs_precision_tril_op.solvevec(
        self.coeffs_precision_tril_op.solvevec(tf.einsum('nm,n->m', x, y)),
        adjoint=True) / self.noise_variance
    # TODO(trandustin): To be fully Keras-compatible, return History object.
sryza/spark-timeseries · RegressionARIMA.py
Match rating: 42.25% · See similar code snippets
python logo
def fit_cochrane_orcutt(ts, regressors, maxIter=10, sc=None):
    Fit linear regression model with AR(1) errors , for references on Cochrane Orcutt model:
    See [[https://onlinecourses.science.psu.edu/stat501/node/357]]
    See : Applied Linear Statistical Models - Fifth Edition - Michael H. Kutner , page 492
    The method assumes the time series to have the following model
    Y_t = B.X_t + e_t
    e_t = rho*e_t-1+w_t
    e_t has autoregressive structure , where w_t is iid ~ N(0,&sigma 2)
    Outline of the method :
    1) OLS Regression for Y (timeseries) over regressors (X)
    2) Apply auto correlation test (Durbin-Watson test) over residuals , to test whether e_t still
       have auto-regressive structure
    3) if test fails stop , else update update coefficients (B's) accordingly and go back to step 1)
        Vector of size N for time series data to create the model for as a Numpy array
        Matrix N X K for the timed values for K regressors over N time points as a Numpy array
        maximum number of iterations in iterative cochrane-orchutt estimation
    Returns instance of class [[RegressionARIMAModel]]
    assert sc != None, "Missing SparkContext"
    jvm = sc._jvm
    fnord = _nparray2breezematrix(sc, regressors)
    jmodel = jvm.com.cloudera.sparkts.models.RegressionARIMA.fitCochraneOrcutt(_nparray2breezevector(sc, ts), _nparray2breezematrix(sc, regressors), maxIter)
    return RegressionARIMAModel(jmodel=jmodel, sc=sc)
frmdstryr/enamlx · qt_plot_area.py
Match rating: 41.39% · See similar code snippets
python logo
def set_auto_range(self,auto_range):
        d = self.declaration
        if not isinstance(auto_range, tuple):
            auto_range = (auto_range,auto_range)
            self.declaration.auto_range = auto_range
        if not auto_range[0]:
        if not auto_range[1]:
log2timeline/dfvfs · manager.py
Match rating: 41.28% · See similar code snippets
python logo
def RegisterDecompressor(cls, decompressor):
    """Registers a decompressor for a specific compression method.

      decompressor (type): decompressor class.

      KeyError: if the corresponding decompressor is already set.
    compression_method = decompressor.COMPRESSION_METHOD.lower()
    if compression_method in cls._decompressors:
      raise KeyError(
          'Decompressor for compression method: {0:s} already set.'.format(

    cls._decompressors[compression_method] = decompressor
sryza/spark-timeseries · RegressionARIMA.py
Match rating: 40.91% · See similar code snippets
python logo
def fit_model(ts, regressors, method="cochrane-orcutt", optimizationArgs=None, sc=None):
        time series to which to fit an ARIMA model as a Numpy array
        regression matrix as a Numpy array
        Regression method. Currently, only "cochrane-orcutt" is supported.
        The SparkContext, required.
    returns an RegressionARIMAModel
    assert sc != None, "Missing SparkContext"
    jvm = sc._jvm
    jmodel = jvm.com.cloudera.sparkts.models.RegressionARIMA.fitModel(_nparray2breezevector(sc, ts), _nparray2breezematrix(sc, regressors), method, _py2scala_seq(sc, optimizationArgs))
    return RegressionARIMAModel(jmodel=jmodel, sc=sc)
david-cortes/costsensitive · __init__.py
Match rating: 40.9% · See similar code snippets
python logo
def _fit(self, c, X, C):
        self.regressors[c].fit(X, C[:, c])
zarr-developers/zarr · creation.py
Match rating: 40.83% · See similar code snippets
python logo
def _kwargs_compat(compressor, fill_value, kwargs):

    # to be compatible with h5py, as well as backwards-compatible with Zarr
    # 1.x, accept 'compression' and 'compression_opts' keyword arguments

    if compressor != 'default':
        # 'compressor' overrides 'compression'
        if 'compression' in kwargs:
            warn("'compression' keyword argument overridden by 'compressor'")
            del kwargs['compression']
        if 'compression_opts' in kwargs:
            warn("'compression_opts' keyword argument overridden by 'compressor'")
            del kwargs['compression_opts']

    elif 'compression' in kwargs:
        compression = kwargs.pop('compression')
        compression_opts = kwargs.pop('compression_opts', None)

        if compression is None or compression == 'none':
            compressor = None

        elif compression == 'default':
            compressor = default_compressor

        elif isinstance(compression, str):
            codec_cls = codec_registry[compression]

            # handle compression_opts
            if isinstance(compression_opts, dict):
                compressor = codec_cls(**compression_opts)
            elif isinstance(compression_opts, (list, tuple)):
                compressor = codec_cls(*compression_opts)
            elif compression_opts is None:
                compressor = codec_cls()
                # assume single argument, e.g., int
                compressor = codec_cls(compression_opts)

        # be lenient here if user gives compressor as 'compression'
        elif hasattr(compression, 'get_config'):
            compressor = compression

            raise ValueError('bad value for compression: %r' % compression)

    # handle 'fillvalue'
    if 'fillvalue' in kwargs:
        # to be compatible with h5py, accept 'fillvalue' instead of
        # 'fill_value'
        fill_value = kwargs.pop('fillvalue')

    # ignore other keyword arguments
    for k in kwargs:
        warn('ignoring keyword argument %r' % k)

    return compressor, fill_value
librosa/librosa · audio.py
Match rating: 40.33% · See similar code snippets
python logo
def __lpc(y, order):
    # This implementation follows the description of Burg's algorithm given in
    # section III of Marple's paper referenced in the docstring.
    # We use the Levinson-Durbin recursion to compute AR coefficients for each
    # increasing model order by using those from the last. We maintain two
    # arrays and then flip them each time we increase the model order so that
    # we may use all the coefficients from the previous order while we compute
    # those for the new one. These two arrays hold ar_coeffs for order M and
    # order M-1.  (Corresponding to a_{M,k} and a_{M-1,k} in eqn 5)
    ar_coeffs = np.zeros(order+1, dtype=y.dtype)
    ar_coeffs[0] = 1
    ar_coeffs_prev = np.zeros(order+1, dtype=y.dtype)
    ar_coeffs_prev[0] = 1

    # These two arrays hold the forward and backward prediction error. They
    # correspond to f_{M-1,k} and b_{M-1,k} in eqns 10, 11, 13 and 14 of
    # Marple. First they are used to compute the reflection coefficient at
    # order M from M-1 then are re-used as f_{M,k} and b_{M,k} for each
    # iteration of the below loop
    fwd_pred_error = y[1:]
    bwd_pred_error = y[:-1]

    # DEN_{M} from eqn 16 of Marple.
    den = np.dot(fwd_pred_error, fwd_pred_error) \
        + np.dot(bwd_pred_error, bwd_pred_error)

    for i in range(order):
        if den <= 0:
            raise FloatingPointError('numerical error, input ill-conditioned?')

        # Eqn 15 of Marple, with fwd_pred_error and bwd_pred_error
        # corresponding to f_{M-1,k+1} and b{M-1,k} and the result as a_{M,M}
        reflect_coeff = -2 * np.dot(bwd_pred_error, fwd_pred_error) / den

        # Now we use the reflection coefficient and the AR coefficients from
        # the last model order to compute all of the AR coefficients for the
        # current one.  This is the Levinson-Durbin recursion described in
        # eqn 5.
        # Note 1: We don't have to care about complex conjugates as our signals
        # are all real-valued
        # Note 2: j counts 1..order+1, i-j+1 counts order..0
        # Note 3: The first element of ar_coeffs* is always 1, which copies in
        # the reflection coefficient at the end of the new AR coefficient array
        # after the preceding coefficients
        ar_coeffs_prev, ar_coeffs = ar_coeffs, ar_coeffs_prev
        for j in range(1, i+2):
            ar_coeffs[j] = ar_coeffs_prev[j] + reflect_coeff*ar_coeffs_prev[i - j + 1]

        # Update the forward and backward prediction errors corresponding to
        # eqns 13 and 14.  We start with f_{M-1,k+1} and b_{M-1,k} and use them
        # to compute f_{M,k} and b_{M,k}
        fwd_pred_error_tmp = fwd_pred_error
        fwd_pred_error = fwd_pred_error + reflect_coeff*bwd_pred_error
        bwd_pred_error = bwd_pred_error + reflect_coeff*fwd_pred_error_tmp

        # SNIP - we are now done with order M and advance. M-1 <- M

        # Compute DEN_{M} using the recursion from eqn 17.
        # reflect_coeff = a_{M-1,M-1}      (we have advanced M)
        # den =  DEN_{M-1}                 (rhs)
        # bwd_pred_error = b_{M-1,N-M+1}   (we have advanced M)
        # fwd_pred_error = f_{M-1,k}       (we have advanced M)
        # den <- DEN_{M}                   (lhs)
        q = 1 - reflect_coeff**2
        den = q*den - bwd_pred_error[-1]**2 - fwd_pred_error[0]**2

        # Shift up forward error.
        # fwd_pred_error <- f_{M-1,k+1}
        # bwd_pred_error <- b_{M-1,k}
        # N.B. We do this after computing the denominator using eqn 17 but
        # before using it in the numerator in eqn 15.
        fwd_pred_error = fwd_pred_error[1:]
        bwd_pred_error = bwd_pred_error[:-1]

    return ar_coeffs
log2timeline/dfvfs · manager.py
Match rating: 40.25% · See similar code snippets
python logo
def GetDecompressor(cls, compression_method):
    """Retrieves the decompressor object for a specific compression method.

      compression_method (str): compression method identifier.

      Decompressor: decompressor or None if the compression method does
          not exists.
    compression_method = compression_method.lower()
    decompressor = cls._decompressors.get(compression_method, None)
    if not decompressor:
      return None

    return decompressor()
petebachant/Nortek-Python · controls.py
Match rating: 39.84% · See similar code snippets
python logo
def start_disk_recording(self, filename, autoname=False):
        """Starts data recording to disk. Specify the filename without extension. 
        If autoname = True a new file will be opened for data recording each time 
        the specified time interval has elapsed. The current date and time is then 
        automatically added to the filename."""
        self.pdx.StartDiskRecording(filename, autoname)