| gt
				 stringclasses 1
				value | context
				 stringlengths 2.49k 119k | 
|---|---|
| 
	""" Model definition functions and weight loading.
"""
from __future__ import print_function, division
from keras.models import Model, Sequential
from keras.layers.merge import concatenate
from keras.layers import Input, Bidirectional, Embedding, Dense, Dropout, SpatialDropout1D, LSTM, Activation
from keras.regularizers import L1L2
from attlayer import AttentionWeightedAverage
from global_variables import NB_TOKENS, NB_EMOJI_CLASSES
import numpy as np
from copy import deepcopy
from os.path import exists
import h5py
def deepmoji_feature_encoding(maxlen, weight_path, return_attention=False):
    """ Loads the pretrained DeepMoji model for extracting features
        from the penultimate feature layer. In this way, it transforms
        the text into its emotional encoding.
    # Arguments:
        maxlen: Maximum length of a sentence (given in tokens).
        weight_path: Path to model weights to be loaded.
        return_attention: If true, output will be weight of each input token
            used for the prediction
    # Returns:
        Pretrained model for encoding text into feature vectors.
    """
    model = deepmoji_architecture(nb_classes=None, nb_tokens=NB_TOKENS,
                                  maxlen=maxlen, feature_output=True,
                                  return_attention=return_attention)
    load_specific_weights(model, weight_path, exclude_names=['softmax'])
    return model
def deepmoji_emojis(maxlen, weight_path, return_attention=False):
    """ Loads the pretrained DeepMoji model for extracting features
        from the penultimate feature layer. In this way, it transforms
        the text into its emotional encoding.
    # Arguments:
        maxlen: Maximum length of a sentence (given in tokens).
        weight_path: Path to model weights to be loaded.
        return_attention: If true, output will be weight of each input token
            used for the prediction
    # Returns:
        Pretrained model for encoding text into feature vectors.
    """
    model = deepmoji_architecture(nb_classes=NB_EMOJI_CLASSES,
                                  nb_tokens=NB_TOKENS, maxlen=maxlen,
                                  return_attention=return_attention)
    model.load_weights(weight_path, by_name=False)
    return model
def deepmoji_transfer(nb_classes, maxlen, weight_path=None, extend_embedding=0,
                      embed_dropout_rate=0.25, final_dropout_rate=0.5,
                      embed_l2=1E-6):
    """ Loads the pretrained DeepMoji model for finetuning/transfer learning.
        Does not load weights for the softmax layer.
        Note that if you are planning to use class average F1 for evaluation,
        nb_classes should be set to 2 instead of the actual number of classes
        in the dataset, since binary classification will be performed on each
        class individually.
        Note that for the 'new' method, weight_path should be left as None.
    # Arguments:
        nb_classes: Number of classes in the dataset.
        maxlen: Maximum length of a sentence (given in tokens).
        weight_path: Path to model weights to be loaded.
        extend_embedding: Number of tokens that have been added to the
            vocabulary on top of NB_TOKENS. If this number is larger than 0,
            the embedding layer's dimensions are adjusted accordingly, with the
            additional weights being set to random values.
        embed_dropout_rate: Dropout rate for the embedding layer.
        final_dropout_rate: Dropout rate for the final Softmax layer.
        embed_l2: L2 regularization for the embedding layerl.
    # Returns:
        Model with the given parameters.
    """
    model = deepmoji_architecture(nb_classes=nb_classes,
                                  nb_tokens=NB_TOKENS + extend_embedding,
                                  maxlen=maxlen, embed_dropout_rate=embed_dropout_rate,
                                  final_dropout_rate=final_dropout_rate, embed_l2=embed_l2)
    if weight_path is not None:
        load_specific_weights(model, weight_path,
                              exclude_names=['softmax'],
                              extend_embedding=extend_embedding)
    return model
def deepmoji_architecture(nb_classes, nb_tokens, maxlen, feature_output=False, embed_dropout_rate=0, final_dropout_rate=0, embed_l2=1E-6, return_attention=False):
    """
    Returns the DeepMoji architecture uninitialized and
    without using the pretrained model weights.
    # Arguments:
        nb_classes: Number of classes in the dataset.
        nb_tokens: Number of tokens in the dataset (i.e. vocabulary size).
        maxlen: Maximum length of a token.
        feature_output: If True the model returns the penultimate
                        feature vector rather than Softmax probabilities
                        (defaults to False).
        embed_dropout_rate: Dropout rate for the embedding layer.
        final_dropout_rate: Dropout rate for the final Softmax layer.
        embed_l2: L2 regularization for the embedding layerl.
    # Returns:
        Model with the given parameters.
    """
    # define embedding layer that turns word tokens into vectors
    # an activation function is used to bound the values of the embedding
    model_input = Input(shape=(maxlen,), dtype='int32')
    embed_reg = L1L2(l2=embed_l2) if embed_l2 != 0 else None
    embed = Embedding(input_dim=nb_tokens,
                      output_dim=256,
                      mask_zero=True,
                      input_length=maxlen,
                      embeddings_regularizer=embed_reg,
                      name='embedding')
    x = embed(model_input)
    x = Activation('tanh')(x)
    # entire embedding channels are dropped out instead of the
    # normal Keras embedding dropout, which drops all channels for entire words
    # many of the datasets contain so few words that losing one or more words can alter the emotions completely
    if embed_dropout_rate != 0:
        embed_drop = SpatialDropout1D(embed_dropout_rate, name='embed_drop')
        x = embed_drop(x)
    # skip-connection from embedding to output eases gradient-flow and allows access to lower-level features
    # ordering of the way the merge is done is important for consistency with the pretrained model
    lstm_0_output = Bidirectional(LSTM(512, return_sequences=True), name="bi_lstm_0")(x)
    lstm_1_output = Bidirectional(LSTM(512, return_sequences=True), name="bi_lstm_1")(lstm_0_output)
    x = concatenate([lstm_1_output, lstm_0_output, x])
    # if return_attention is True in AttentionWeightedAverage, an additional tensor
    # representing the weight at each timestep is returned
    weights = None
    x = AttentionWeightedAverage(name='attlayer', return_attention=return_attention)(x)
    if return_attention:
        x, weights = x
    if not feature_output:
        # output class probabilities
        if final_dropout_rate != 0:
            x = Dropout(final_dropout_rate)(x)
        if nb_classes > 2:
            outputs = [Dense(nb_classes, activation='softmax', name='softmax')(x)]
        else:
            outputs = [Dense(1, activation='sigmoid', name='softmax')(x)]
    else:
        # output penultimate feature vector
        outputs = [x]
    if return_attention:
        # add the attention weights to the outputs if required
        outputs.append(weights)
    return Model(inputs=[model_input], outputs=outputs, name="DeepMoji")
def load_specific_weights(model, weight_path, exclude_names=[], extend_embedding=0, verbose=True):
    """ Loads model weights from the given file path, excluding any
        given layers.
    # Arguments:
        model: Model whose weights should be loaded.
        weight_path: Path to file containing model weights.
        exclude_names: List of layer names whose weights should not be loaded.
        extend_embedding: Number of new words being added to vocabulary.
        verbose: Verbosity flag.
    # Raises:
        ValueError if the file at weight_path does not exist.
    """
    if not exists(weight_path):
        raise ValueError('ERROR (load_weights): The weights file at {} does '
                         'not exist. Refer to the README for instructions.'
                         .format(weight_path))
    if extend_embedding and 'embedding' in exclude_names:
        raise ValueError('ERROR (load_weights): Cannot extend a vocabulary '
                         'without loading the embedding weights.')
    # Copy only weights from the temporary model that are wanted
    # for the specific task (e.g. the Softmax is often ignored)
    layer_weights = get_weights_from_hdf5(weight_path)
    for i, w in enumerate(layer_weights):
        l_name = w[0]
        weight_names = w[1]
        weight_values = w[2]
        if l_name in exclude_names:
            if verbose:
                print('Ignoring weights for {}'.format(l_name))
            continue
        try:
            model_l = model.get_layer(name=l_name)
        except ValueError:
            raise ValueError("Weights had layer {},".format(l_name) +
                             " but could not find this layer in model.")
        if verbose:
            print('Loading weights for {}'.format(l_name))
        # extend embedding layer to allow new randomly initialized words
        # if requested. Otherwise, just load the weights for the layer.
        if type(model_l) is Embedding and extend_embedding > 0:
            comb_weights = append_to_embedding(weight_values,
                                               model_l.get_weights())
            model_l.set_weights(comb_weights)
            if verbose:
                print('Extended vocabulary for embedding layer ' +
                      'from {} to {} tokens.'.format(
                          NB_TOKENS, NB_TOKENS + extend_embedding))
        else:
            model_l.set_weights(weight_values)
def append_to_embedding(pretrain_weights, random_init_weights):
    """ Uses pretrained weights for the tokens already in the vocabulary.
        Remaining weights will be left with the random initialization. """
    pretrain_weights = deepcopy(pretrain_weights)
    if type(pretrain_weights) == list:
        pretrain_weights = pretrain_weights[0]
    if type(random_init_weights) == list:
        random_init_weights = random_init_weights[0]
    nb_old_tokens = np.shape(pretrain_weights)[0]
    random_init_weights[:nb_old_tokens] = pretrain_weights
    # must be returned as a list to be properly inserted into Keras model
    return [random_init_weights]
def get_weights_from_hdf5(filepath):
    """ Loads the weights from a saved Keras model into numpy arrays.
        The weights are saved using Keras 2.0 so we don't need all the
        conversion functionality for handling old weights.
    """
    with h5py.File(filepath, mode='r') as f:
        layer_names = [n.decode('utf8') for n in f.attrs['layer_names']]
        layer_weights = []
        for k, l_name in enumerate(layer_names):
            g = f[l_name]
            weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]
            weight_values = [g[weight_name][:] for weight_name in weight_names]
            if len(weight_values):
                layer_weights.append([l_name, weight_names, weight_values])
        return layer_weights
 | |
| 
	#
# This is Seisflows
#
# See LICENCE file
#
###############################################################################
# Import system modules
import sys
# Import Numpy and Obspy
import numpy as np
import obspy
# Local imports
from seisflows.tools import msg, unix
from seisflows.tools.tools import exists, getset
from seisflows.config import ParameterError
from seisflows.plugins import adjoint, misfit, readers, writers
from seisflows.tools import signal
PAR = sys.modules['seisflows_parameters']
PATH = sys.modules['seisflows_paths']
class base(object):
    """ Data preprocessing class
      Provides data processing functions for seismic traces, with options for
      data misfit, filtering, normalization and muting
    """
    def check(self):
        """ Checks parameters and paths
        """
        # used for inversion
        if 'MISFIT' not in PAR:
            setattr(PAR, 'MISFIT', None)
        # used for migration
        if 'BACKPROJECT' not in PAR:
            setattr(PAR, 'BACKPROJECT', None)
        # data file format
        if 'FORMAT' not in PAR:
            raise ParameterError(PAR, 'FORMAT')
        # data normalization option
        if 'NORMALIZE' not in PAR:
            setattr(PAR, 'NORMALIZE', None)
        # data muting option
        if 'MUTE' not in PAR:
            setattr(PAR, 'MUTE', None)
        # data filtering option
        if 'FILTER' not in PAR:
            setattr(PAR, 'FILTER', None)
        # assertions
        if PAR.FORMAT not in dir(readers):
            print msg.ReaderError
            raise ParameterError()
        if PAR.FORMAT not in dir(writers):
            print msg.WriterError
            raise ParameterError()
        self.check_filter()
        self.check_mute()
        self.check_normalize()
    def setup(self):
        """ Sets up data preprocessing machinery
        """
        # define misfit function and adjoint trace generator
        if PAR.MISFIT:
            self.misfit = getattr(misfit, PAR.MISFIT)
            self.adjoint = getattr(adjoint, PAR.MISFIT)
        elif PAR.BACKPROJECT:
            self.adjoint = getattr(adjoint, PAR.BACKPROJECT)
        # define seismic data reader and writer
        self.reader = getattr(readers, PAR.FORMAT)
        self.writer = getattr(writers, PAR.FORMAT)
    def prepare_eval_grad(self, path='.'):
        """
         Prepares solver for gradient evaluation by writing residuals and
         adjoint traces
         :input path: directory containing observed and synthetic seismic data
        """
        solver = sys.modules['seisflows_solver']
        for filename in solver.data_filenames:
            obs = self.reader(path+'/'+'traces/obs', filename)
            syn = self.reader(path+'/'+'traces/syn', filename)
            # process observations
            obs = self.apply_filter(obs)
            obs = self.apply_mute(obs)
            obs = self.apply_normalize(obs)
            # process synthetics
            syn = self.apply_filter(syn)
            syn = self.apply_mute(syn)
            syn = self.apply_normalize(syn)
            if PAR.MISFIT:
                self.write_residuals(path, syn, obs)
            self.write_adjoint_traces(path+'/'+'traces/adj', syn, obs,
                                      filename)
    def write_residuals(self, path, syn, obs):
        """
        Computes residuals
        :input path: location "adjoint traces" will be written
        :input syn: obspy Stream object containing synthetic data
        :input obs: obspy Stream object containing observed data
        """
        nt, dt, _ = self.get_time_scheme(syn)
        nn, _ = self.get_network_size(syn)
        residuals = []
        for ii in range(nn):
            residuals.append(self.misfit(syn[ii].data, obs[ii].data, nt, dt))
        filename = path+'/'+'residuals'
        if exists(filename):
            residuals.extend(list(np.loadtxt(filename)))
        np.savetxt(filename, residuals)
    def sum_residuals(self, files):
        """
        Sums squares of residuals
        :input files: list of single-column text files containing residuals
        :output total_misfit: sum of squares of residuals
        """
        total_misfit = 0.
        for filename in files:
            total_misfit += np.sum(np.loadtxt(filename)**2.)
        return total_misfit
    def write_adjoint_traces(self, path, syn, obs, channel):
        """
        Writes "adjoint traces" required for gradient computation
        :input path: location "adjoint traces" will be written
        :input syn: obspy Stream object containing synthetic data
        :input obs: obspy Stream object containing observed data
        :input channel: channel or component code used by writer
        """
        nt, dt, _ = self.get_time_scheme(syn)
        nn, _ = self.get_network_size(syn)
        adj = syn
        for ii in range(nn):
            adj[ii].data = self.adjoint(syn[ii].data, obs[ii].data, nt, dt)
        self.writer(adj, path, channel)
    # Signal processing
    def apply_filter(self, traces):
        if not PAR.FILTER:
            return traces
        elif PAR.FILTER == 'Bandpass':
            for tr in traces:
                tr.detrend('demean')
                tr.detrend('linear')
                tr.taper(0.05, type='hann')
                tr.filter('bandpass',
                          zerophase=True,
                          freqmin=PAR.FREQMIN,
                          freqmax=PAR.FREQMAX)
        elif PAR.FILTER == 'Lowpass':
            for tr in traces:
                tr.detrend('demean')
                tr.detrend('linear')
                tr.taper(0.05, type='hann')
                tr.filter('lowpass',
                          zerophase=True,
                          freq=PAR.FREQ)
        elif PAR.FILTER == 'Highpass':
            for tr in traces:
                tr.detrend('demean')
                tr.detrend('linear')
                tr.taper(0.05, type='hann')
                tr.filter('highpass',
                          zerophase=True,
                          freq=PAR.FREQ)
        else:
            raise ParameterError()
        return traces
    def apply_mute(self, traces):
        if not PAR.MUTE:
            return traces
        if 'MuteEarlyArrivals' in PAR.MUTE:
            traces = signal.mute_early_arrivals(traces,
                PAR.MUTE_EARLY_ARRIVALS_SLOPE,  # (units: time/distance)
                PAR.MUTE_EARLY_ARRIVALS_CONST,  # (units: time)
                self.get_time_scheme(traces),
                self.get_source_coords(traces),
                self.get_receiver_coords(traces))
        if 'MuteLateArrivals' in PAR.MUTE:
            traces = signal.mute_late_arrivals(traces,
                PAR.MUTE_LATE_ARRIVALS_SLOPE,  # (units: time/distance)
                PAR.MUTE_LATE_ARRIVALS_CONST,  # (units: time)
                self.get_time_scheme(traces),
                self.get_source_coords(traces),
                self.get_receiver_coords(traces))
        if 'MuteShortOffsets' in PAR.MUTE:
            traces = signal.mute_short_offsets(traces,
                PAR.MUTE_SHORT_OFFSETS_DIST,
                self.get_source_coords(traces),
                self.get_receiver_coords(traces))
        if 'MuteLongOffsets' in PAR.MUTE:
            traces = signal.mute_long_offsets(traces,
                PAR.MUTE_LONG_OFFSETS_DIST,
                self.get_source_coords(traces),
                self.get_receiver_coords(traces))
        return traces
    def apply_normalize(self, traces):
        if not PAR.NORMALIZE:
            return traces
        if 'NormalizeEventsL1' in PAR.NORMALIZE:
            # normalize event by L1 norm of all traces
            w = 0.
            for tr in traces:
                w += np.linalg.norm(tr.data, ord=1)
            for tr in traces:
                tr.data /= w
        elif 'NormalizeEventsL2' in PAR.NORMALIZE:
            # normalize event by L2 norm of all traces
            w = 0.
            for tr in traces:
                w += np.linalg.norm(tr.data, ord=2)
            for tr in traces:
                tr.data /= w
        if 'NormalizeTracesL1' in PAR.NORMALIZE:
            # normalize each trace by its L1 norm
            for tr in traces:
                w = np.linalg.norm(tr.data, ord=1)
                if w > 0:
                    tr.data /= w
        elif 'NormalizeTracesL2' in PAR.NORMALIZE:
            # normalize each trace by its L2 norm
            for tr in traces:
                w = np.linalg.norm(tr.data, ord=2)
                if w > 0:
                    tr.data /= w
        return traces
    def apply_filter_backwards(self, traces):
        for tr in traces:
            tr.data = np.flip(tr.data)
        traces = self.apply_filter()
        for tr in traces:
            tr.data = np.flip(tr.data)
        return traces
    # Additional parameter checking
    def check_filter(self):
        """ Checks filter settings
        """
        assert getset(PAR.FILTER) < set([
            'Bandpass',
            'Lowpass',
            'Highpass'])
        if PAR.FILTER == 'Bandpass':
            if 'FREQMIN' not in PAR:
                raise ParameterError('FREQMIN')
            if 'FREQMAX' not in PAR:
                raise ParameterError('FREQMAX')
            assert 0 < PAR.FREQMIN
            assert PAR.FREQMIN < PAR.FREQMAX
            assert PAR.FREQMAX < np.inf
        elif PAR.FILTER == 'Lowpass':
            raise NotImplementedError
            if 'FREQ' not in PAR:
                raise ParameterError('FREQ')
            assert 0 < PAR.FREQ <= np.inf
        elif PAR.FILTER == 'Highpass':
            raise NotImplementedError
            if 'FREQ' not in PAR:
                raise ParameterError('FREQ')
            assert 0 <= PAR.FREQ < np.inf
    def check_mute(self):
        """ Checks mute settings
        """
        if not PAR.MUTE:
            return
        assert getset(PAR.MUTE) <= set([
            'MuteEarlyArrivals',
            'MuteLateArrivals',
            'MuteShortOffsets',
            'MuteLongOffsets'])
        if 'MuteEarlyArrivals' in PAR.MUTE:
            assert 'MUTE_EARLY_ARRIVALS_SLOPE' in PAR
            assert 'MUTE_EARLY_ARRIVALS_CONST' in PAR
            assert PAR.MUTE_EARLY_ARRIVALS_SLOPE >= 0.
        if 'MuteLateArrivals' in PAR.MUTE:
            assert 'MUTE_LATE_ARRIVALS_SLOPE' in PAR
            assert 'MUTE_LATE_ARRIVALS_CONST' in PAR
            assert PAR.MUTE_LATE_ARRIVALS_SLOPE >= 0.
        if 'MuteShortOffsets' in PAR.MUTE:
            assert 'MUTE_SHORT_OFFSETS_DIST' in PAR
            assert 0 < PAR.MUTE_SHORT_OFFSETS_DIST
        if 'MuteLongOffsets' in PAR.MUTE:
            assert 'MUTE_LONG_OFFSETS_DIST' in PAR
            assert 0 < PAR.MUTE_LONG_OFFSETS_DIST
        if 'MuteShortOffsets' not in PAR.MUTE:
            setattr(PAR, 'MUTE_SHORT_OFFSETS_DIST', 0.)
        if 'MuteLongOffsets' not in PAR.MUTE:
            setattr(PAR, 'MUTE_LONG_OFFSETS_DIST', 0.)
    def check_normalize(self):
        assert getset(PAR.NORMALIZE) < set([
            'NormalizeTracesL1',
            'NormalizeTracesL2',
            'NormalizeEventsL1',
            'NormalizeEventsL2'])
    # Utility functions
    def get_time_scheme(self, traces):
        """ FIXME: extract time scheme from trace headers rather than
            parameters file.
            Note from Alexis Bottero : it is actually better like this in
            my opinion because this allows for longer traces to be processed.
            Indeed, in su format only 2 bytes are dedicated to the number of
            samples which is supposed to be stored as an unsigned int. The
            maximum NT which can be stored in the header is then 32762 whereas
            there is no limit in principle.
        """
        nt = PAR.NT
        dt = PAR.DT
        t0 = 0.
        return nt, dt, t0
    def get_network_size(self, traces):
        nrec = len(traces)
        nsrc = 1
        return nrec, nsrc
    def get_receiver_coords(self, traces):
        if PAR.FORMAT in ['SU', 'su']:
            rx = []
            ry = []
            rz = []
            for trace in traces:
                rx += [trace.stats.su.trace_header.group_coordinate_x]
                ry += [trace.stats.su.trace_header.group_coordinate_y]
                rz += [0.]
            return rx, ry, rz
        else:
            raise NotImplementedError
    def get_source_coords(self, traces):
        if PAR.FORMAT in ['SU', 'su']:
            sx = []
            sy = []
            sz = []
            for trace in traces:
                sx += [trace.stats.su.trace_header.source_coordinate_x]
                sy += [trace.stats.su.trace_header.source_coordinate_y]
                sz += [0.]
            return sx, sy, sz
        else:
            raise NotImplementedError
 | |
| 
	from ..titanic import digital
from ..titanic import gmpmath
from ..titanic.ops import OP
class MPNum(digital.Digital):
    # must be implemented in subclasses
    @classmethod
    def _select_context(cls, *args, ctx=None):
        raise ValueError('virtual method: unimplemented')
    @classmethod
    def _round_to_context(cls, unrounded, ctx=None, strict=False):
        raise ValueError('virtual method: unimplemented')
    # most operations
    def add(self, other, ctx=None):
        ctx = self._select_context(self, other, ctx=ctx)
        result = gmpmath.compute(OP.add, self, other, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def sub(self, other, ctx=None):
        ctx = self._select_context(self, other, ctx=ctx)
        result = gmpmath.compute(OP.sub, self, other, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def mul(self, other, ctx=None):
        ctx = self._select_context(self, other, ctx=ctx)
        result = gmpmath.compute(OP.mul, self, other, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def div(self, other, ctx=None):
        ctx = self._select_context(self, other, ctx=ctx)
        result = gmpmath.compute(OP.div, self, other, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def sqrt(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.sqrt, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def fma(self, other1, other2, ctx=None):
        ctx = self._select_context(self, other1, other2, ctx=ctx)
        result = gmpmath.compute(OP.fma, self, other1, other2, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def neg(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.neg, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def copysign(self, other, ctx=None):
        ctx = self._select_context(self, other, ctx=ctx)
        result = gmpmath.compute(OP.copysign, self, other, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def fabs(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.fabs, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def fdim(self, other, ctx=None):
        # emulated
        ctx = self._select_context(self, other, ctx=ctx)
        result = gmpmath.compute(OP.sub, self, other, prec=ctx.p)
        zero = digital.Digital(negative=False, c=0, exp=0)
        if result < zero:
            return type(self)(negative=False, c=0, exp=0, inexact=False, rc=0)
        else:
            # never return negative zero
            rounded = self._round_to_context(result, ctx=ctx, strict=True)
            return type(self)(rounded, negative=False)
    def fmax(self, other, ctx=None):
        # emulated
        ctx = self._select_context(self, other, ctx=ctx)
        if self.isnan:
            return self._round_to_context(other, ctx=ctx, strict=False)
        elif other.isnan:
            return self._round_to_context(self, ctx=ctx, strict=False)
        else:
            return self._round_to_context(max(self, other), ctx=ctx, strict=False)
    def fmin(self, other, ctx=None):
        # emulated
        ctx = self._select_context(self, other, ctx=ctx)
        if self.isnan:
            return self._round_to_context(other, ctx=ctx, strict=False)
        elif other.isnan:
            return self._round_to_context(self, ctx=ctx, strict=False)
        else:
            return self._round_to_context(min(self, other), ctx=ctx, strict=False)
    def fmod(self, other, ctx=None):
        ctx = self._select_context(self, other, ctx=ctx)
        result = gmpmath.compute(OP.fmod, self, other, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def remainder(self, other, ctx=None):
        ctx = self._select_context(self, other, ctx=ctx)
        result = gmpmath.compute(OP.remainder, self, other, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def ceil(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.ceil, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def floor(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.floor, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def nearbyint(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.nearbyint, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def round(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.round, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def trunc(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.trunc, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def acos(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.acos, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def acosh(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.acosh, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def asin(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.asin, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def asinh(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.asinh, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def atan(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.atan, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def atan2(self, other, ctx=None):
        ctx = self._select_context(self, other, ctx=ctx)
        result = gmpmath.compute(OP.atan2, self, other, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def atanh(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.atanh, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def cos(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.cos, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def cosh(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.cosh, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def sin(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.sin, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def sinh(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.sinh, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def tan(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.tan, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def tanh(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.tanh, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def exp_(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.exp, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def exp2(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.exp2, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def expm1(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.expm1, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def log(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.log, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def log10(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.log10, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def log1p(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.log1p, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def log2(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.log2, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def cbrt(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.cbrt, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def hypot(self, other, ctx=None):
        ctx = self._select_context(self, other, ctx=ctx)
        result = gmpmath.compute(OP.hypot, self, other, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def pow(self, other, ctx=None):
        ctx = self._select_context(self, other, ctx=ctx)
        if other.is_zero():
            # avoid possibly passing nan to gmpmath.compute
            return type(self)(negative=False, c=1, exp=0, inexact=False, rc=0)
        result = gmpmath.compute(OP.pow, self, other, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def erf(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.erf, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def erfc(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.erfc, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def lgamma(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.lgamma, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def tgamma(self, ctx=None):
        ctx = self._select_context(self, ctx=ctx)
        result = gmpmath.compute(OP.tgamma, self, prec=ctx.p)
        return self._round_to_context(result, ctx=ctx, strict=True)
    def isfinite(self):
        return not (self.isinf or self.isnan)
    # isinf and isnan are properties
    # isnormal is implementation specific - override if necessary
    def isnormal(self):
        return not (
            self.is_zero()
            or self.isinf
            or self.isnan
        )
    def signbit(self):
        return self.negative
 | |
| 
	import logging
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.utils.encoding import smart_unicode
from restlib2.http import codes
from restlib2.params import StrParam, IntParam, BoolParam
from modeltree.tree import MODELTREE_DEFAULT_ALIAS, trees
from avocado.events import usage
from avocado.query import pipeline
from .base import FieldBase, is_field_orphaned
from ..pagination import PaginatorResource, PaginatorParametizer
from ...links import patch_response, reverse_tmpl
log = logging.getLogger(__name__)
class FieldValuesParametizer(PaginatorParametizer):
    aware = BoolParam(False)
    limit = IntParam(10)
    tree = StrParam(MODELTREE_DEFAULT_ALIAS, choices=trees)
    processor = StrParam('default', choices=pipeline.query_processors)
    query = StrParam()
    random = IntParam()
class FieldValues(FieldBase, PaginatorResource):
    """Field Values Resource
    This resource can be overriden for any field to use a more
    performant search implementation.
    """
    parametizer = FieldValuesParametizer
    def get_base_values(self, request, instance, params):
        "Returns the base queryset for this field."
        # The `aware` flag toggles the behavior of the distribution by making
        # relative to the applied context or none
        if params['aware']:
            context = self.get_context(request)
        else:
            context = self.get_context(request, attrs={})
        return context.apply(queryset=instance.model.objects.all())
    def get_all_values(self, request, instance, queryset):
        "Returns all distinct values for this field."
        results = []
        for value, label in instance.choices(queryset=queryset):
            results.append({
                'label': label,
                'value': value,
            })
        return results
    def get_search_values(self, request, instance, query, queryset):
        """
        Performs a search on the underlying data for a field.
        This method can be overridden to use an alternate search
        implementation.
        """
        results = []
        value_labels = instance.value_labels(queryset=queryset)
        for value in instance.search(query, queryset=queryset):
            results.append({
                'label': value_labels.get(value, smart_unicode(value)),
                'value': value,
            })
        return results
    def get_random_values(self, request, instance, random, queryset):
        """
        Returns a random set of value/label pairs.
        This is useful for pre-populating documents or form fields with
        example data.
        """
        values = instance.random(random, queryset=queryset)
        results = []
        for value in values:
            results.append({
                'label': instance.get_label(value, queryset=queryset),
                'value': value,
            })
        return results
    def get_link_templates(self, request):
        uri = request.build_absolute_uri
        return {
            'parent': reverse_tmpl(
                uri, 'serrano:field', {'pk': (int, 'parent_id')})
        }
    def get(self, request, pk):
        instance = self.get_object(request, pk=pk)
        if is_field_orphaned(instance):
            data = {
                'message': 'Orphaned fields do not support values calls.'
            }
            return self.render(
                request, data, status=codes.unprocessable_entity)
        params = self.get_params(request)
        if params['aware']:
            context = self.get_context(request)
        else:
            context = None
        QueryProcessor = pipeline.query_processors[params['processor']]
        processor = QueryProcessor(tree=instance.model, context=context)
        queryset = processor.get_queryset(request=request)
        if params['random']:
            # In the case that the queryset contains a population smaller than
            # the number of random items being requested, a ValueError will be
            # triggered. Instead of passing the error on to the client, we
            # simply return all the possible values.
            try:
                return self.get_random_values(
                    request, instance, params['random'], queryset)
            except ValueError:
                return instance.values(queryset=queryset)
        page = params['page']
        limit = params['limit']
        # If a query term is supplied, perform the icontains search.
        if params['query']:
            usage.log('items', instance=instance, request=request, data={
                'query': params['query'],
            })
            values = self.get_search_values(
                request, instance, params['query'], queryset)
        else:
            values = self.get_all_values(request, instance, queryset)
        # No page specified, return everything.
        if page is None:
            return values
        paginator = self.get_paginator(values, limit=limit)
        page = paginator.page(page)
        # Get paginator-based response.
        data = self.get_page_response(request, paginator, page)
        data.update({
            'items': page.object_list,
        })
        # Add links.
        path = reverse('serrano:field-values', kwargs={'pk': pk})
        links = self.get_page_links(request, path, page, extra=params)
        templates = self.get_link_templates(request)
        response = self.render(request, content=data)
        return patch_response(request, response, links, templates)
    def post(self, request, pk):
        instance = self.get_object(request, pk=pk)
        params = self.get_params(request)
        if not request.data:
            data = {
                'message': 'Error parsing data',
            }
            return self.render(request, data,
                               status=codes.unprocessable_entity)
        if isinstance(request.data, dict):
            array = [request.data]
        else:
            array = request.data
        values = []
        labels = []
        array_map = {}
        # Separate out the values and labels for the lookup. Track indexes
        # maintain order of array
        for i, datum in enumerate(array):
            # Value takes precedence over label if supplied.
            if 'value' in datum:
                array_map[i] = 'value'
                values.append(datum['value'])
            elif 'label' in datum:
                array_map[i] = 'label'
                labels.append(datum['label'])
            else:
                data = {
                    'message': 'Error parsing value or label'
                }
                return self.render(request, data,
                                   status=codes.unprocessable_entity)
        value_field_name = instance.field_name
        label_field_name = instance.label_field.name
        # Note, this return a context-aware or naive queryset depending
        # on params. Get the value and label fields so they can be filled
        # in below.
        queryset = self.get_base_values(request, instance, params)\
            .values_list(value_field_name, label_field_name)
        lookup = Q()
        # Validate based on the label.
        if labels:
            lookup |= Q(**{'{0}__in'.format(label_field_name): labels})
        if values:
            lookup |= Q(**{'{0}__in'.format(value_field_name): values})
        results = queryset.filter(lookup)
        value_labels = dict(results)
        label_values = dict([(v, k) for k, v in value_labels.items()])
        for i, datum in enumerate(array):
            if array_map[i] == 'label':
                valid = datum['label'] in label_values
                if valid:
                    value = label_values[datum['label']]
                else:
                    value = datum['label']
                datum['valid'] = valid
                datum['value'] = value
            else:
                valid = datum['value'] in value_labels
                if valid:
                    label = value_labels[datum['value']]
                else:
                    label = smart_unicode(datum['value'])
                datum['valid'] = valid
                datum['label'] = label
        usage.log('validate', instance=instance, request=request, data={
            'count': len(array),
        })
        # Return the augmented data.
        return request.data
 | |
| 
	#! /usr/env/python
"""
This module attempts to "component-ify" GT's Fastscape stream power erosion.
Created DEJH, March 2014.
"""
from __future__ import print_function
import numpy
import warnings
from landlab import ModelParameterDictionary, Component
from landlab.core.model_parameter_dictionary import MissingKeyError, \
    ParameterValueError
from landlab.utils.decorators import use_file_name_or_kwds
from landlab.field.scalar_data_fields import FieldError
from scipy.optimize import newton, fsolve
UNDEFINED_INDEX = -1
class FastscapeEroder(Component):
    '''
    This class uses the Braun-Willett Fastscape approach to calculate the
    amount of erosion at each node in a grid, following a stream power
    framework. This should allow it to be stable against larger timesteps
    than an explicit stream power scheme.
    Stream power erosion is implemented as::
        E = K * (rainfall_intensity*A)**m * S**n - threshold_sp,
    if K * A**m * S**n > threshold_sp, and::
        E = 0,
    if K * A**m * S**n <= threshold_sp.
    This module assumes you have already run
    :func:`landlab.components.flow_routing.route_flow_dn.FlowRouter.route_flow`
    in the same timestep. It looks for 'flow__upstream_node_order',
    'flow__link_to_receiver_node', 'drainage_area', 'flow__receiver_node', and
    'topographic__elevation' at the nodes in the grid. 'drainage_area' should
    be in area upstream, not volume (i.e., set runoff_rate=1.0 when calling
    FlowRouter.route_flow).
    The primary method of this class is :func:`run_one_step`.
    Construction::
        FastscapeEroder(grid, K_sp=None, m_sp=0.5, n_sp=1., threshold_sp=0.,
                        rainfall_intensity=1.)
    Parameters
    ----------
    grid : ModelGrid
        A grid.
    K_sp : float, array, or field name
        K in the stream power equation (units vary with other parameters).
    m_sp : float, optional
        m in the stream power equation (power on drainage area).
    n_sp : float, optional, ~ 0.5<n_sp<4.
        n in the stream power equation (power on slope).
        Performance will be VERY degraded if n < 1.
    threshold_sp : float, array, or field name
        The threshold stream power.
    rainfall_intensity : float; optional
        Modifying factor on drainage area to convert it to a true water
        volume flux in (m/time). i.e., E = K * (r_i*A)**m * S**n. For a time
        varying rainfall intensity, pass rainfall_intensity_if_used to
        `run_one_step`. For a spatially variable rainfall, use the
        StreamPowerEroder component.
    Examples
    --------
    >>> import numpy as np
    >>> from landlab import RasterModelGrid
    >>> from landlab import CLOSED_BOUNDARY, FIXED_VALUE_BOUNDARY
    >>> from landlab.components import FlowRouter
    >>> mg = RasterModelGrid((5, 5), 10.)
    >>> z = np.array([7.,  7.,  7.,  7.,  7.,
    ...               7.,  5., 3.2,  6.,  7.,
    ...               7.,  2.,  3.,  5.,  7.,
    ...               7.,  1., 1.9,  4.,  7.,
    ...               7.,  0.,  7.,  7.,  7.])
    >>> z = mg.add_field('node', 'topographic__elevation', z)
    >>> fr = FlowRouter(mg)
    >>> sp = FastscapeEroder(mg, K_sp=1.)
    >>> fr.run_one_step()
    >>> sp.run_one_step(dt=1.)
    >>> z  # doctest: +NORMALIZE_WHITESPACE
    array([ 7.        ,  7.        ,  7.        ,  7.        ,  7.        ,
            7.        ,  2.92996598,  2.02996598,  4.01498299,  7.        ,
            7.        ,  0.85993197,  1.87743897,  3.28268321,  7.        ,
            7.        ,  0.28989795,  0.85403051,  2.42701526,  7.        ,
            7.        ,  0.        ,  7.        ,  7.        ,  7.        ])
    >>> mg2 = RasterModelGrid((3, 7), 1.)
    >>> z = np.array(mg2.node_x**2.)
    >>> z = mg2.add_field('node', 'topographic__elevation', z)
    >>> mg2.status_at_node[mg2.nodes_at_left_edge] = FIXED_VALUE_BOUNDARY
    >>> mg2.status_at_node[mg2.nodes_at_top_edge] = CLOSED_BOUNDARY
    >>> mg2.status_at_node[mg2.nodes_at_bottom_edge] = CLOSED_BOUNDARY
    >>> mg2.status_at_node[mg2.nodes_at_right_edge] = CLOSED_BOUNDARY
    >>> fr2 = FlowRouter(mg2)
    >>> sp2 = FastscapeEroder(mg2, K_sp=0.1, m_sp=0., n_sp=2.,
    ...                       threshold_sp=2.)
    >>> fr2.run_one_step()
    >>> sp2.run_one_step(dt=10.)
    >>> z.reshape((3, 7))[1, :]  # doctest: +NORMALIZE_WHITESPACE
    array([  0.        ,   1.        ,   4.        ,   8.52493781,
            13.29039716,  18.44367965,  36.        ])
    >>> mg3 = RasterModelGrid((3, 7), 1.)
    >>> z = np.array(mg3.node_x**2.)
    >>> z = mg3.add_field('node', 'topographic__elevation', z)
    >>> mg3.status_at_node[mg3.nodes_at_left_edge] = FIXED_VALUE_BOUNDARY
    >>> mg3.status_at_node[mg3.nodes_at_top_edge] = CLOSED_BOUNDARY
    >>> mg3.status_at_node[mg3.nodes_at_bottom_edge] = CLOSED_BOUNDARY
    >>> mg3.status_at_node[mg3.nodes_at_right_edge] = CLOSED_BOUNDARY
    >>> fr3 = FlowRouter(mg3)
    >>> K_field = mg3.ones('node')  # K can be a field
    >>> sp3 = FastscapeEroder(mg3, K_sp=K_field, m_sp=1., n_sp=0.6,
    ...                       threshold_sp=mg3.node_x,
    ...                       rainfall_intensity=2.)
    >>> fr3.run_one_step()
    >>> sp3.run_one_step(1.)
    >>> z.reshape((3, 7))[1, :]  # doctest: +NORMALIZE_WHITESPACE
    array([  0.        ,   0.0647484 ,   0.58634455,   2.67253503,
             8.49212152,  20.92606987,  36.        ])
    >>> previous_z = z.copy()
    >>> sp3.run_one_step(1., rainfall_intensity_if_used=0.)
    >>> np.allclose(z, previous_z)
    True
    '''
    _name = 'FastscapeEroder'
    _input_var_names = (
        'topographic__elevation',
        'drainage_area',
        'flow__link_to_receiver_node',
        'flow__upstream_node_order',
        'flow__receiver_node',
    )
    _output_var_names = (
        'topographic__elevation',
    )
    _var_units = {
        'topographic__elevation': 'm',
        'drainage_area': 'm**2',
        'flow__link_to_receiver_node': '-',
        'flow__upstream_node_order': '-',
        'flow__receiver_node': '-',
    }
    _var_mapping = {
        'topographic__elevation': 'node',
        'drainage_area': 'node',
        'flow__link_to_receiver_node': 'node',
        'flow__upstream_node_order': 'node',
        'flow__receiver_node': 'node',
    }
    _var_doc = {
        'topographic__elevation': 'Land surface topographic elevation',
        'drainage_area':
            "Upstream accumulated surface area contributing to the node's "
            "discharge",
        'flow__link_to_receiver_node':
            'ID of link downstream of each node, which carries the discharge',
        'flow__upstream_node_order':
            'Node array containing downstream-to-upstream ordered list of '
            'node IDs',
        'flow__receiver_node':
            'Node array of receivers (node that receives flow from current '
            'node)',
    }
    @use_file_name_or_kwds
    def __init__(self, grid, K_sp=None, m_sp=0.5, n_sp=1., threshold_sp=0.,
                 rainfall_intensity=1., **kwds):
        """
        Initialize the Fastscape stream power component. Note: a timestep,
        dt, can no longer be supplied to this component through the input file.
        It must instead be passed directly to the run method.
        Parameters
        ----------
        grid : ModelGrid
            A grid.
        K_sp : float, array, or field name
            K in the stream power equation (units vary with other parameters).
        m_sp : float, optional
            m in the stream power equation (power on drainage area).
        n_sp : float, optional
            n in the stream power equation (power on slope).
        rainfall intensity : float, array, or field name; optional
            Modifying factor on drainage area to convert it to a true water
            volume flux in (m/time). i.e., E = K * (r_i*A)**m * S**n
        """
        self._grid = grid
        self.K = K_sp  # overwritten below in special cases
        self.m = float(m_sp)
        self.n = float(n_sp)
        if type(threshold_sp) in (float, int):
            self.thresholds = float(threshold_sp)
        else:
            if type(threshold_sp) is str:
                self.thresholds = self.grid.at_node[threshold_sp]
            else:
                self.thresholds = threshold_sp
            assert self.thresholds.size == self.grid.number_of_nodes
        # make storage variables
        self.A_to_the_m = grid.zeros(at='node')
        self.alpha = grid.empty(at='node')
        self.alpha_by_flow_link_lengthtothenless1 = numpy.empty_like(
                                                        self.alpha)
        try:
            self.grid._diagonal_links_at_node  # calc number of diagonal links
        except AttributeError:
            pass  # was not a raster
        if self.K is None:
            raise ValueError('K_sp must be set as a float, node array, or ' +
                             'field name. It was None.')
        # now handle the inputs that could be float, array or field name:
        # some support here for old-style inputs
        if type(K_sp) is str:
            if K_sp == 'array':
                self.K = None
            else:
                self.K = self._grid.at_node[K_sp]
        elif type(K_sp) in (float, int):  # a float
            self.K = float(K_sp)
        elif len(K_sp) == self.grid.number_of_nodes:
            self.K = numpy.array(K_sp)
        else:
            raise TypeError('Supplied type of K_sp ' +
                            'was not recognised, or array was ' +
                            'not nnodes long!')
        if type(rainfall_intensity) is str:
            raise ValueError('This component can no longer handle ' +
                             'spatially variable rainfall. Use ' +
                             'StreamPowerEroder.')
            if rainfall_intensity == 'array':
                self._r_i = None
            else:
                self._r_i = self._grid.at_node[rainfall_intensity]
        elif type(rainfall_intensity) in (float, int):  # a float
            self._r_i = float(rainfall_intensity)
        elif len(rainfall_intensity) == self.grid.number_of_nodes:
            raise ValueError('This component can no longer handle ' +
                             'spatially variable rainfall. Use ' +
                             'StreamPowerEroder.')
            self._r_i = numpy.array(rainfall_intensity)
        else:
            raise TypeError('Supplied type of rainfall_' +
                            'intensity was not recognised!')
        # We now forbid changing of the field name
        if 'value_field' in kwds.keys():
            raise ValueError('This component can no longer support variable' +
                             'field names. Use "topographic__elevation".')
    def erode(self, grid_in, dt=None, K_if_used=None, flooded_nodes=None,
              rainfall_intensity_if_used=None):
        """
        This method implements the stream power erosion, following the Braun-
        Willett (2013) implicit Fastscape algorithm. This should allow it to
        be stable against larger timesteps than an explicit stream power
        scheme.
        This driving method for this component is now superceded by the new,
        standardized wrapper :func:`run_one_step`, but is retained for
        back compatibility.
        Set 'K_if_used' as a field name or nnodes-long array if you set K_sp as
        'array' during initialization.
        It returns the grid, in which it will have modified the value of
        *value_field*, as specified in component initialization.
        Parameters
        ----------
        grid_in : a grid
            This is a dummy argument maintained for component back-
            compatibility. It is superceded by the copy of the grid passed
            during initialization.
        dt : float
            Time-step size. If you are calling the deprecated function
            :func:`gear_timestep`, that method will supercede any value
            supplied here.
        K_if_used : array (optional)
            Set this to an array if you set K_sp to 'array' in your input file.
        flooded_nodes : ndarray of int (optional)
            IDs of nodes that are flooded and should have no erosion. If not
            provided but flow has still been routed across depressions, erosion
            may still occur beneath the apparent water level (though will
            always still be positive).
        rainfall_intensity_if_used : float or None (optional)
            Supply to drive this component with a time-varying spatially
            constant rainfall.
        Returns
        -------
        grid
            A reference to the grid.
        """
        self.alpha = numpy.zeros(self._grid.number_of_nodes)
        self.alpha_by_flow_link_lengthtothenless1 = numpy.zeros(self._grid.number_of_nodes)
        upstream_order_IDs = self._grid['node']['flow__upstream_node_order']
        z = self._grid['node']['topographic__elevation']
        defined_flow_receivers = numpy.not_equal(self._grid['node'][
            'flow__link_to_receiver_node'], UNDEFINED_INDEX)
        flow_link_lengths = self._grid._length_of_link_with_diagonals[
            self._grid['node']['flow__link_to_receiver_node'][
                defined_flow_receivers]]
        # make arrays from input the right size
        if type(self.K) is numpy.ndarray:
            K_here = self.K[defined_flow_receivers]
        else:
            K_here = self.K
        if rainfall_intensity_if_used is not None:
            assert type(rainfall_intensity_if_used) in (float, int)
            r_i_here = float(rainfall_intensity_if_used)
        else:
            r_i_here = self._r_i
        if dt is None:
            dt = self.dt
        assert dt is not None, ('Fastscape component could not find a dt to ' +
                                'use. Pass dt to the run_one_step() method.')
        if self.K is None:  # "old style" setting of array
            assert K_if_used is not None
            self.K = K_if_used
        numpy.power(self._grid['node']['drainage_area'], self.m,
                    out=self.A_to_the_m)
        self.alpha[defined_flow_receivers] = r_i_here**self.m * K_here * dt * \
            self.A_to_the_m[defined_flow_receivers] / flow_link_lengths
        flow_receivers = self._grid['node']['flow__receiver_node']
        n_nodes = upstream_order_IDs.size
        alpha = self.alpha
        # Handle flooded nodes, if any (no erosion there)
        if flooded_nodes is not None:
            alpha[flooded_nodes] = 0.
        else:
            reversed_flow = z < z[flow_receivers]
            # this check necessary if flow has been routed across depressions
            alpha[reversed_flow] = 0.
        self.alpha_by_flow_link_lengthtothenless1[
            defined_flow_receivers] = (alpha[defined_flow_receivers] /
                                       flow_link_lengths**(self.n - 1.))
        alpha_divided = self.alpha_by_flow_link_lengthtothenless1
        n = float(self.n)
        threshdt = self.thresholds * dt
        if type(self.thresholds) is float:
            from .cfuncs import erode_with_link_alpha_fixthresh
            erode_with_link_alpha_fixthresh(upstream_order_IDs, flow_receivers,
                                            threshdt, alpha_divided, n, z)
        else:
            from .cfuncs import erode_with_link_alpha_varthresh
            erode_with_link_alpha_varthresh(upstream_order_IDs, flow_receivers,
                                            threshdt, alpha_divided, n, z)
            # # This replicates the cython for testing:
            # for i in range(upstream_order_IDs.size):
            #     src_id = upstream_order_IDs[i]
            #     dst_id = flow_receivers[src_id]
            #     thresh = threshdt[i]
            #     if src_id != dst_id:
            #         next_z = z[src_id]
            #         prev_z = 0.
            #         while True:
            #         #for j in range(2):
            #             z_diff = next_z - z[dst_id]
            #             f = alpha_divided[src_id] * pow(z_diff, n - 1.)
            #             # if z_diff -> 0, pow -> nan (in reality, inf)
            #             # print (f, prev_z, next_z, z_diff, z[dst_id])
            #             next_z = next_z - ((next_z - z[src_id] + (
            #                 f*z_diff - thresh).clip(0.)) / (1. + n * f))
            #             if next_z < z[dst_id]:
            #                 next_z = z[dst_id] + 1.e-15
            #                 # ^maintain connectivity
            #             if next_z != 0.:
            #                 if (numpy.fabs((next_z - prev_z)/next_z) <
            #                     1.48e-08) or (n == 1.):
            #                     break
            #             else:
            #                 break
            #             prev_z = next_z
            #         if next_z < z[src_id]:
            #             z[src_id] = next_z
        return self._grid
    def run_one_step(self, dt, flooded_nodes=None,
                     rainfall_intensity_if_used=None, **kwds):
        """
        This method implements the stream power erosion across one time
        interval, dt, following the Braun-Willett (2013) implicit Fastscape
        algorithm.
        This follows Landlab standardized component design, and supercedes the
        old driving method :func:`erode`.
        Parameters
        ----------
        dt : float
            Time-step size
        flooded_nodes : ndarray of int (optional)
            IDs of nodes that are flooded and should have no erosion. If not
            provided but flow has still been routed across depressions, erosion
            may still occur beneath the apparent water level (though will
            always still be positive).
        rainfall_intensity_if_used : float or None (optional)
            Supply to drive this component with a time-varying spatially
            constant rainfall.
        """
        self.erode(grid_in=self._grid, dt=dt, flooded_nodes=flooded_nodes,
                   rainfall_intensity_if_used=rainfall_intensity_if_used)
 | |
| 
	import logging
import os
import shutil
import tempfile
from crawler_exceptions import CrawlError, CrawlUnsupportedPackageManager
from utils import osinfo
from utils.features import PackageFeature
from utils.misc import subprocess_run
logger = logging.getLogger('crawlutils')
def get_dpkg_packages(
        root_dir='/',
        dbpath='var/lib/dpkg',
        installed_since=0):
    if os.path.isabs(dbpath):
        logger.warning(
            'dbpath: ' +
            dbpath +
            ' is defined absolute. Ignoring prefix: ' +
            root_dir +
            '.')
    # Update for a different route.
    dbpath = os.path.join(root_dir, dbpath)
    output = subprocess_run(['dpkg-query', '-W',
                             '--admindir={0}'.format(dbpath),
                             '-f=${Package}|${Version}'
                             '|${Architecture}|${Installed-Size}\n'],
                            shell=False)
    dpkglist = output.strip('\n')
    if dpkglist:
        for dpkginfo in dpkglist.split('\n'):
            (name, version, architecture, size) = dpkginfo.split(r'|')
            # dpkg does not provide any installtime field
            # feature_key = '{0}/{1}'.format(name, version) -->
            # changed to below per Suriya's request
            feature_key = '{0}'.format(name, version)
            yield (feature_key, PackageFeature(None, name,
                                               size, version,
                                               architecture))
def get_rpm_packages(
        root_dir='/',
        dbpath='var/lib/rpm',
        installed_since=0,
        reload_needed=False):
    if os.path.isabs(dbpath):
        logger.warning(
            'dbpath: ' +
            dbpath +
            ' is defined absolute. Ignoring prefix: ' +
            root_dir +
            '.')
    # update for a different route
    dbpath = os.path.join(root_dir, dbpath)
    try:
        if reload_needed:
            reloaded_db_dir = tempfile.mkdtemp()
            _rpm_reload_db(root_dir, dbpath, reloaded_db_dir)
            dbpath = reloaded_db_dir
        output = subprocess_run(['rpm',
                                 '--dbpath',
                                 dbpath,
                                 '-qa',
                                 '--queryformat',
                                 '%{installtime}|%{name}|%{version}'
                                 '-%{release}|%{arch}|%{size}\n'],
                                shell=False,
                                ignore_failure=True)
        # We ignore failures because sometimes rpm returns rc=1 but still
        # outputs all the data.
        rpmlist = output.strip('\n')
    finally:
        if reload_needed:
            logger.debug('Deleting directory: %s' % (reloaded_db_dir))
            shutil.rmtree(reloaded_db_dir)
    if rpmlist:
        for rpminfo in rpmlist.split('\n'):
            (installtime, name, version, architecture, size) = \
                rpminfo.split(r'|')
            """
            if int(installtime) <= installed_since: --> this
            barfs for sth like: 1376416422. Consider try: xxx
            except ValueError: pass
            """
            if installtime <= installed_since:
                continue
            """
            feature_key = '{0}/{1}'.format(name, version) -->
            changed to below per Suriya's request
            """
            feature_key = '{0}'.format(name, version)
            yield (feature_key,
                   PackageFeature(installtime,
                                  name, size, version, architecture))
def _rpm_reload_db(
        root_dir='/',
        dbpath='var/lib/rpm',
        reloaded_db_dir='/tmp/'):
    """
    Dumps and reloads the rpm database.
    Returns the path to the new rpm database, or raises RuntimeError if the
    dump and load commands failed.
    """
    try:
        dump_dir = tempfile.mkdtemp()
        subprocess_run(['/usr/bin/db_dump',
                        os.path.join(dbpath, 'Packages'),
                        '-f',
                        os.path.join(dump_dir, 'Packages')],
                       shell=False)
        subprocess_run(['/usr/bin/db_load',
                        '-f',
                        os.path.join(dump_dir, 'Packages'),
                        os.path.join(reloaded_db_dir, 'Packages')],
                       shell=False)
    finally:
        logger.debug('Deleting directory: %s' % (dump_dir))
        shutil.rmtree(dump_dir)
    return reloaded_db_dir
# from UK crawler codebase
def apk_parser(filename):
    try:
        db_contents = open(filename).read()
        packages = db_contents.split('\n\n')
        logger.debug('Found {} APK packages'.format(len(packages)))
        for package in packages:
            if package:
                attributes = package.split('\n')
                name = ""
                version = ""
                architecture = ""
                size = ""
                for attribute in attributes:
                    if (attribute.startswith('P:')):
                        name = attribute[2:]
                    elif (attribute.startswith('V:')):
                        version = attribute[2:]
                    elif (attribute.startswith('A:')):
                        architecture = attribute[2:]
                    elif (attribute.startswith('S:')):
                        size = attribute[2:]
                yield (name, PackageFeature(None, name,
                                            size, version,
                                            architecture))
    except IOError as e:
        logger.error('Failed to read APK database to obtain packages. '
                     'Check if %s is present.  [Exception: %s: %s]'
                     ' ' % (filename, type(e).__name__, e.strerror))
        raise
def get_apk_packages(
        root_dir='/',
        dbpath='lib/apk/db'):
    if os.path.isabs(dbpath):
        logger.warning(
            'dbpath: ' +
            dbpath +
            ' is defined absolute. Ignoring prefix: ' +
            root_dir +
            '.')
    # Update for a different route.
    dbpath = os.path.join(root_dir, dbpath)
    for feature_key, package_feature in apk_parser(
            os.path.join(dbpath, 'installed')):
        yield (feature_key, package_feature)
def crawl_packages(
        dbpath=None,
        root_dir='/',
        installed_since=0,
        reload_needed=True):
    # package attributes: ["installed", "name", "size", "version"]
    logger.debug('Crawling Packages')
    try:
        pkg_manager = _get_package_manager(root_dir)
        if pkg_manager == 'dpkg':
            dbpath = dbpath or 'var/lib/dpkg'
            for (key, feature) in get_dpkg_packages(
                    root_dir, dbpath, installed_since):
                yield (key, feature, 'package')
        elif pkg_manager == 'rpm':
            dbpath = dbpath or 'var/lib/rpm'
            for (key, feature) in get_rpm_packages(
                    root_dir, dbpath, installed_since, reload_needed):
                yield (key, feature, 'package')
        elif pkg_manager == 'apk':
            dbpath = dbpath or 'lib/apk/db'
            for (key, feature) in get_apk_packages(
                    root_dir, dbpath):
                yield (key, feature, 'package')
        else:
            logger.warning('Unsupported package manager for Linux distro')
    except Exception as e:
        logger.error('Error crawling packages',
                     exc_info=True)
        raise CrawlError(e)
def _get_package_manager(root_dir):
    result = osinfo.get_osinfo(mount_point=root_dir)
    if result:
        os_distro = result['os']
    else:
        raise CrawlUnsupportedPackageManager()
    pkg_manager = None
    if os_distro in ['ubuntu', 'debian']:
        pkg_manager = 'dpkg'
    elif os_distro in ['redhat', 'red hat', 'rhel', 'fedora', 'centos']:
        pkg_manager = 'rpm'
    elif os_distro in ['alpine']:
        pkg_manager = 'apk'
    elif os.path.exists(os.path.join(root_dir, 'var/lib/dpkg')):
        pkg_manager = 'dpkg'
    elif os.path.exists(os.path.join(root_dir, 'var/lib/rpm')):
        pkg_manager = 'rpm'
    return pkg_manager
 | |
| 
	import math
import os
import threading
from collections import defaultdict
from typing import Dict
import copy
from twisted.internet.address import IPv4Address
import bptc
from bptc.data.consensus import divide_rounds, decide_fame, find_order
from bptc.data.event import Event, Parents
from bptc.data.member import Member
from bptc.utils.toposort import toposort
from bptc.data.transaction import MoneyTransaction, TransactionStatus, PublishNameTransaction
class Hashgraph:
    """
    The Hashgraph - storing the events of all nodes
    """
    def __init__(self, me, debug_mode=False):
        self.lock = threading.RLock()
        # Member: A reference to the current user. For convenience (e.g. signing)
        self.me = me
        self.debug_mode = debug_mode
        # {member-id => Member}: All members we know
        if me is not None:
            self.known_members = {me.id: me}
        # {event-hash => event}: Dictionary mapping hashes to events
        self.lookup_table = {}
        # {event-hash}: Events for which the final order has not yet been determined
        self.unordered_events = set()
        # [event-hash]: Final order of events
        self.ordered_events = []
        self.next_ordered_event_idx_to_process = 0
        self.idx = {}
        # {round-num}: rounds where fame is fully decided
        self.rounds_with_decided_fame = set()
        # {round-num => {member-pk => event-hash}}:
        self.witnesses = defaultdict(dict)
        # {event-hash => set(event-hash)}: Cache for event's self-children (used for fast fork check)
        self.self_children_cache = defaultdict(set)
        # set(member-id): A set of member who forked. Members who forked have no visible events.
        self.fork_blacklist = set()
    @property
    def total_stake(self) -> int:
        """
        :return: The total stake in the hashgraph
        """
        return sum([member.stake for _, member in self.known_members.items()])
    @property
    def supermajority_stake(self) -> int:
        """
        :return: The stake needed for a supermajority (2/3 of total)
        """
        return int(math.floor(2 * self.total_stake / 3))
    def get_unknown_events_of(self, member: Member) -> Dict[str, Event]:
        """
        Returns the presumably unknown events of a given member, in the same format as lookup_table
        :param member: The member for which to return unknown events
        :return: Dictionary mapping hashes to events
        """
        result = dict(self.lookup_table)
        head = member.head
        if head is None:
            return result
        to_visit = {head}
        visited = set()
        while len(to_visit) > 0:
            event_id = to_visit.pop()
            if event_id not in visited:
                event = result[event_id]
                del result[event_id]
                if event.parents.self_parent is not None:
                    to_visit.add(event.parents.self_parent)
                if event.parents.other_parent is not None:
                    to_visit.add(event.parents.other_parent)
                visited.add(event_id)
        return result
    def add_own_event(self, event: Event, calculate_consensus: bool = False):
        """
        Adds an own event to the hashgraph
        :param event: The event to be added
        :param calculate_consensus: Whether the consensus should be calculated immediately
        :return: None
        """
        # Sign event body
        event.sign(self.me.signing_key)
        # Add event
        self.add_event(event)
        # Only do consensus if this is the first event
        if calculate_consensus:
            divide_rounds(self, [event])
            decide_fame(self)
            find_order(self)
            self.process_ordered_events()
    def add_event(self, event: Event):
        # Set the event's correct height
        if event.parents.self_parent:
            event.height = self.lookup_table[event.parents.self_parent].height + 1
        # Add event to graph
        self.lookup_table[event.id] = event
        # Update caches
        self.unordered_events.add(event.id)
        if self.known_members[event.verify_key].head is None or \
                event.height > self.lookup_table[self.known_members[event.verify_key].head].height:
            self.known_members[event.verify_key].head = event.id
        if event.parents.self_parent is not None:
            self.self_children_cache[event.parents.self_parent].add(event.id)
            if len(self.self_children_cache[event.parents.self_parent]) > 1:
                # We just added a fork
                bptc.logger.warn("A fork was created! Blacklisting member and clearing visibility caches.")
                # Blacklist the member who forked
                self.fork_blacklist.add(event.verify_key)
                # Visibility for events could have changed - throw away the caches
                for e in self.lookup_table.values():
                    e.can_see_cache.clear()
    def process_events(self, from_member: Member, events: Dict[str, Event]) -> None:
        """
        Processes a list of events
        :param from_member: The member from whom the events were received
        :param events: The events to be processed
        :return: None
        """
        events = copy.deepcopy(events)
        bptc.logger.debug("Processing {} events from {}...".format(len(events), from_member.verify_key[:6]))
        # Only deal with valid events
        events = filter_valid_events(events)
        events_toposorted = toposort(events)
        # Learn about other members
        self.learn_members_from_events(events)
        # Add all new events in topological order and check parent pointer
        new_events = {}
        for event in events_toposorted:
            if event.id not in self.lookup_table:
                if event.parents.self_parent is not None and event.parents.self_parent not in self.lookup_table:
                    bptc.logger.error('Self parent {} of {} not known. Ignore all data.'.
                                      format(event.parents.self_parent[:6], event.id[:6]))
                    return
                if event.parents.other_parent is not None and event.parents.other_parent not in self.lookup_table:
                    bptc.logger.error('Other parent {} of {} not known. Ignore all data'.
                                         format(event.parents.other_parent[:6], event.id[:6]))
                    return
                new_events[event.id] = event
                self.add_event(event)
        # Create a new event for the gossip
        event = Event(self.me.verify_key, None, Parents(self.me.head, from_member.head))
        self.add_own_event(event)
        new_events[event.id] = event
        # Figure out fame, order, etc.
        divide_rounds(self, toposort(new_events))
        decide_fame(self)
        find_order(self)
        self.process_ordered_events()
        # Debug mode writes the DB to a file every 100 events.
        if self.debug_mode:
            number_events = (len(self.lookup_table) // 100) * 100
            # Don't store when there are not enough events or it would overwrite
            # the last temporary db
            if number_events > 0 and number_events > self.debug_mode:
                bptc.logger.debug('Store intermediate results containing about {} events'.format(number_events))
                from bptc.data.db import DB
                DB.save(self, temp=True)
                self.debug_mode = (len(self.lookup_table) // 100) * 100
    def learn_members_from_events(self, events: Dict[str, Event]) -> None:
        """
        Goes through a list of events and learns their creators if they are not already known
        :param events: The list of events
        :return: None
        """
        for event in events.values():
            if event.verify_key not in self.known_members:
                self.known_members[event.verify_key] = Member(event.verify_key, None)
    def process_ordered_events(self):
        for event_id in self.ordered_events[self.next_ordered_event_idx_to_process:len(self.ordered_events)]:
            event = self.lookup_table[event_id]
            if event.data is None:
                continue
            for transaction in event.data:
                sender = self.known_members[event.verify_key]
                if isinstance(transaction, MoneyTransaction):
                    receiver = self.known_members[transaction.receiver]
                    # Check if the sender has the funds
                    if sender.account_balance < transaction.amount or transaction.amount < 0:
                        transaction.status = TransactionStatus.DENIED
                    else:
                        sender.account_balance -= transaction.amount
                        receiver.account_balance += transaction.amount
                        transaction.status = TransactionStatus.CONFIRMED
                elif isinstance(transaction, PublishNameTransaction):
                    sender.name = transaction.name
        self.next_ordered_event_idx_to_process = len(self.ordered_events)
    def parse_transaction(self, event, transaction, plain=False):
        receiver = self.known_members[transaction.receiver].formatted_name if \
            transaction.receiver in self.known_members else transaction.receiver
        sender = self.known_members[event.verify_key].formatted_name if \
            event.verify_key in self.known_members else event.verify_key
        status = TransactionStatus.text_for_value(transaction.status)
        is_received = transaction.receiver == self.me.to_verifykey_string()
        amount = transaction.amount
        comment = transaction.comment
        time = event.time
        rec = {
            'receiver': receiver,
            'sender': sender,
            'amount': amount,
            'comment': comment,
            'time': time,
            'status': status,
            'is_received': is_received,
        }
        format_string = '{} [b]{} BPTC[/b] {} [b]{}[/b] ({}){}'
        if plain:
            format_string = '{} {} BPTC {} {} ({}){}'
        rec['formatted'] = format_string.format(
            'Received' if is_received else 'Sent',
            amount,
            'from' if rec['is_received'] else 'to',
            sender if rec['is_received'] else receiver,
            status,
            '\n"{}"'.format(comment) if comment else '',
        ).replace('\n', ' - ' if plain else '\n')
        return rec
    def get_relevant_transactions(self, plain=False, show_all=False):
        # Load transactions belonging to this member
        transactions = []
        events = list(self.lookup_table.values())
        for e in events:
            for t in e.data or []:
                if isinstance(t, MoneyTransaction):
                    if show_all or self.me.to_verifykey_string() in [e.verify_key, t.receiver]:
                        transactions.append(self.parse_transaction(e, t, plain))
        return sorted(transactions, key=lambda x: x['time'], reverse=True)
def filter_valid_events(events: Dict[str, Event]) -> Dict[str, Event]:
    """
    Goes through a dict of events and returns a dict containing only the valid ones
    :param events: The dict to be filtered
    :return: A dict containing only valid events
    """
    result = dict()
    for event_id, event in events.items():
        if event.has_valid_signature:
            result[event_id] = event
        else:
            bptc.logger.warn("Event had invalid signature: {}".format(event))
    return result
def init_hashgraph(app):
    """Loads the hashgraph from file or creates a new one, if the file doesn't exist."""
    from bptc.data.db import DB
    from bptc.data.network import Network
    # Try to load the Hashgraph from the database
    hashgraph = DB.load_hashgraph(os.path.join(app.cl_args.output, 'data.db'))
    hashgraph.debug_mode = app.cl_args.debug
    # Create a new hashgraph if it could not be loaded
    if hashgraph is None or hashgraph.me is None:
        me = Member.create()
        me.address = IPv4Address("TCP", bptc.ip, bptc.port)
        hashgraph = Hashgraph(me, app.cl_args.debug)
        app.network = Network(hashgraph, create_initial_event=True)
    else:
        app.network = Network(hashgraph, create_initial_event=False)
 | |
| 
	# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib  # type: ignore
from google.api_core import exceptions as core_exceptions  # type: ignore
from google.api_core import gapic_v1  # type: ignore
from google.api_core import retry as retries  # type: ignore
from google.auth import credentials as ga_credentials  # type: ignore
from google.auth.transport import mtls  # type: ignore
from google.auth.transport.grpc import SslCredentials  # type: ignore
from google.auth.exceptions import MutualTLSChannelError  # type: ignore
from google.oauth2 import service_account  # type: ignore
from google.ads.googleads.v8.resources.types import campaign_simulation
from google.ads.googleads.v8.services.types import campaign_simulation_service
from .transports.base import (
    CampaignSimulationServiceTransport,
    DEFAULT_CLIENT_INFO,
)
from .transports.grpc import CampaignSimulationServiceGrpcTransport
class CampaignSimulationServiceClientMeta(type):
    """Metaclass for the CampaignSimulationService client.
    This provides class-level methods for building and retrieving
    support objects (e.g. transport) without polluting the client instance
    objects.
    """
    _transport_registry = (
        OrderedDict()
    )  # type: Dict[str, Type[CampaignSimulationServiceTransport]]
    _transport_registry["grpc"] = CampaignSimulationServiceGrpcTransport
    def get_transport_class(
        cls, label: str = None,
    ) -> Type[CampaignSimulationServiceTransport]:
        """Return an appropriate transport class.
        Args:
            label: The name of the desired transport. If none is
                provided, then the first transport in the registry is used.
        Returns:
            The transport class to use.
        """
        # If a specific transport is requested, return that one.
        if label:
            return cls._transport_registry[label]
        # No transport is requested; return the default (that is, the first one
        # in the dictionary).
        return next(iter(cls._transport_registry.values()))
class CampaignSimulationServiceClient(
    metaclass=CampaignSimulationServiceClientMeta
):
    """Service to fetch campaign  simulations."""
    @staticmethod
    def _get_default_mtls_endpoint(api_endpoint):
        """Convert api endpoint to mTLS endpoint.
        Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
        "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
        Args:
            api_endpoint (Optional[str]): the api endpoint to convert.
        Returns:
            str: converted mTLS api endpoint.
        """
        if not api_endpoint:
            return api_endpoint
        mtls_endpoint_re = re.compile(
            r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
        )
        m = mtls_endpoint_re.match(api_endpoint)
        name, mtls, sandbox, googledomain = m.groups()
        if mtls or not googledomain:
            return api_endpoint
        if sandbox:
            return api_endpoint.replace(
                "sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
            )
        return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
    DEFAULT_ENDPOINT = "googleads.googleapis.com"
    DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__(  # type: ignore
        DEFAULT_ENDPOINT
    )
    @classmethod
    def from_service_account_info(cls, info: dict, *args, **kwargs):
        """Creates an instance of this client using the provided credentials info.
        Args:
            info (dict): The service account private key info.
            args: Additional arguments to pass to the constructor.
            kwargs: Additional arguments to pass to the constructor.
        Returns:
            CampaignSimulationServiceClient: The constructed client.
        """
        credentials = service_account.Credentials.from_service_account_info(
            info
        )
        kwargs["credentials"] = credentials
        return cls(*args, **kwargs)
    @classmethod
    def from_service_account_file(cls, filename: str, *args, **kwargs):
        """Creates an instance of this client using the provided credentials
        file.
        Args:
            filename (str): The path to the service account private key json
                file.
            args: Additional arguments to pass to the constructor.
            kwargs: Additional arguments to pass to the constructor.
        Returns:
            CampaignSimulationServiceClient: The constructed client.
        """
        credentials = service_account.Credentials.from_service_account_file(
            filename
        )
        kwargs["credentials"] = credentials
        return cls(*args, **kwargs)
    from_service_account_json = from_service_account_file
    @property
    def transport(self) -> CampaignSimulationServiceTransport:
        """Return the transport used by the client instance.
        Returns:
            CampaignSimulationServiceTransport: The transport used by the client instance.
        """
        return self._transport
    @staticmethod
    def campaign_simulation_path(
        customer_id: str,
        campaign_id: str,
        type: str,
        modification_method: str,
        start_date: str,
        end_date: str,
    ) -> str:
        """Return a fully-qualified campaign_simulation string."""
        return "customers/{customer_id}/campaignSimulations/{campaign_id}~{type}~{modification_method}~{start_date}~{end_date}".format(
            customer_id=customer_id,
            campaign_id=campaign_id,
            type=type,
            modification_method=modification_method,
            start_date=start_date,
            end_date=end_date,
        )
    @staticmethod
    def parse_campaign_simulation_path(path: str) -> Dict[str, str]:
        """Parse a campaign_simulation path into its component segments."""
        m = re.match(
            r"^customers/(?P<customer_id>.+?)/campaignSimulations/(?P<campaign_id>.+?)~(?P<type>.+?)~(?P<modification_method>.+?)~(?P<start_date>.+?)~(?P<end_date>.+?)$",
            path,
        )
        return m.groupdict() if m else {}
    @staticmethod
    def common_billing_account_path(billing_account: str,) -> str:
        """Return a fully-qualified billing_account string."""
        return "billingAccounts/{billing_account}".format(
            billing_account=billing_account,
        )
    @staticmethod
    def parse_common_billing_account_path(path: str) -> Dict[str, str]:
        """Parse a billing_account path into its component segments."""
        m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
        return m.groupdict() if m else {}
    @staticmethod
    def common_folder_path(folder: str,) -> str:
        """Return a fully-qualified folder string."""
        return "folders/{folder}".format(folder=folder,)
    @staticmethod
    def parse_common_folder_path(path: str) -> Dict[str, str]:
        """Parse a folder path into its component segments."""
        m = re.match(r"^folders/(?P<folder>.+?)$", path)
        return m.groupdict() if m else {}
    @staticmethod
    def common_organization_path(organization: str,) -> str:
        """Return a fully-qualified organization string."""
        return "organizations/{organization}".format(organization=organization,)
    @staticmethod
    def parse_common_organization_path(path: str) -> Dict[str, str]:
        """Parse a organization path into its component segments."""
        m = re.match(r"^organizations/(?P<organization>.+?)$", path)
        return m.groupdict() if m else {}
    @staticmethod
    def common_project_path(project: str,) -> str:
        """Return a fully-qualified project string."""
        return "projects/{project}".format(project=project,)
    @staticmethod
    def parse_common_project_path(path: str) -> Dict[str, str]:
        """Parse a project path into its component segments."""
        m = re.match(r"^projects/(?P<project>.+?)$", path)
        return m.groupdict() if m else {}
    @staticmethod
    def common_location_path(project: str, location: str,) -> str:
        """Return a fully-qualified location string."""
        return "projects/{project}/locations/{location}".format(
            project=project, location=location,
        )
    @staticmethod
    def parse_common_location_path(path: str) -> Dict[str, str]:
        """Parse a location path into its component segments."""
        m = re.match(
            r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
        )
        return m.groupdict() if m else {}
    def __init__(
        self,
        *,
        credentials: Optional[ga_credentials.Credentials] = None,
        transport: Union[str, CampaignSimulationServiceTransport, None] = None,
        client_options: Optional[client_options_lib.ClientOptions] = None,
        client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
    ) -> None:
        """Instantiate the campaign simulation service client.
        Args:
            credentials (Optional[google.auth.credentials.Credentials]): The
                authorization credentials to attach to requests. These
                credentials identify the application to the service; if none
                are specified, the client will attempt to ascertain the
                credentials from the environment.
            transport (Union[str, ~.CampaignSimulationServiceTransport]): The
                transport to use. If set to None, a transport is chosen
                automatically.
            client_options (google.api_core.client_options.ClientOptions): Custom options for the
                client. It won't take effect if a ``transport`` instance is provided.
                (1) The ``api_endpoint`` property can be used to override the
                default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
                environment variable can also be used to override the endpoint:
                "always" (always use the default mTLS endpoint), "never" (always
                use the default regular endpoint) and "auto" (auto switch to the
                default mTLS endpoint if client certificate is present, this is
                the default value). However, the ``api_endpoint`` property takes
                precedence if provided.
                (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
                is "true", then the ``client_cert_source`` property can be used
                to provide client certificate for mutual TLS transport. If
                not provided, the default SSL client certificate will be used if
                present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
                set, no client certificate will be used.
            client_info (google.api_core.gapic_v1.client_info.ClientInfo):
                The client info used to send a user-agent string along with
                API requests. If ``None``, then default info will be used.
                Generally, you only need to set this if you're developing
                your own client library.
        Raises:
            google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
                creation failed for any reason.
        """
        if isinstance(client_options, dict):
            client_options = client_options_lib.from_dict(client_options)
        if client_options is None:
            client_options = client_options_lib.ClientOptions()
        # Create SSL credentials for mutual TLS if needed.
        use_client_cert = bool(
            util.strtobool(
                os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
            )
        )
        ssl_credentials = None
        is_mtls = False
        if use_client_cert:
            if client_options.client_cert_source:
                import grpc  # type: ignore
                cert, key = client_options.client_cert_source()
                ssl_credentials = grpc.ssl_channel_credentials(
                    certificate_chain=cert, private_key=key
                )
                is_mtls = True
            else:
                creds = SslCredentials()
                is_mtls = creds.is_mtls
                ssl_credentials = creds.ssl_credentials if is_mtls else None
        # Figure out which api endpoint to use.
        if client_options.api_endpoint is not None:
            api_endpoint = client_options.api_endpoint
        else:
            use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
            if use_mtls_env == "never":
                api_endpoint = self.DEFAULT_ENDPOINT
            elif use_mtls_env == "always":
                api_endpoint = self.DEFAULT_MTLS_ENDPOINT
            elif use_mtls_env == "auto":
                api_endpoint = (
                    self.DEFAULT_MTLS_ENDPOINT
                    if is_mtls
                    else self.DEFAULT_ENDPOINT
                )
            else:
                raise MutualTLSChannelError(
                    "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
                )
        # Save or instantiate the transport.
        # Ordinarily, we provide the transport, but allowing a custom transport
        # instance provides an extensibility point for unusual situations.
        if isinstance(transport, CampaignSimulationServiceTransport):
            # transport is a CampaignSimulationServiceTransport instance.
            if credentials:
                raise ValueError(
                    "When providing a transport instance, "
                    "provide its credentials directly."
                )
            self._transport = transport
        elif isinstance(transport, str):
            Transport = type(self).get_transport_class(transport)
            self._transport = Transport(
                credentials=credentials, host=self.DEFAULT_ENDPOINT
            )
        else:
            self._transport = CampaignSimulationServiceGrpcTransport(
                credentials=credentials,
                host=api_endpoint,
                ssl_channel_credentials=ssl_credentials,
                client_info=client_info,
            )
    def get_campaign_simulation(
        self,
        request: campaign_simulation_service.GetCampaignSimulationRequest = None,
        *,
        resource_name: str = None,
        retry: retries.Retry = gapic_v1.method.DEFAULT,
        timeout: float = None,
        metadata: Sequence[Tuple[str, str]] = (),
    ) -> campaign_simulation.CampaignSimulation:
        r"""Returns the requested campaign simulation in full
        detail.
        Args:
            request (:class:`google.ads.googleads.v8.services.types.GetCampaignSimulationRequest`):
                The request object. Request message for
                [CampaignSimulationService.GetCampaignSimulation][google.ads.googleads.v8.services.CampaignSimulationService.GetCampaignSimulation].
            resource_name (:class:`str`):
                Required. The resource name of the
                campaign simulation to fetch.
                This corresponds to the ``resource_name`` field
                on the ``request`` instance; if ``request`` is provided, this
                should not be set.
            retry (google.api_core.retry.Retry): Designation of what errors, if any,
                should be retried.
            timeout (float): The timeout for this request.
            metadata (Sequence[Tuple[str, str]]): Strings which should be
                sent along with the request as metadata.
        Returns:
            google.ads.googleads.v8.resources.types.CampaignSimulation:
                A campaign simulation. Supported combinations of advertising
                   channel type, simulation type and simulation
                   modification method is detailed below respectively.
                   SEARCH - CPC_BID - UNIFORM SEARCH - CPC_BID - SCALING
                   SEARCH - TARGET_CPA - UNIFORM SEARCH - TARGET_CPA -
                   SCALING SEARCH - TARGET_ROAS - UNIFORM SEARCH -
                   TARGET_IMPRESSION_SHARE - UNIFORM SEARCH - BUDGET -
                   UNIFORM SHOPPING - BUDGET - UNIFORM SHOPPING -
                   TARGET_ROAS - UNIFORM MULTIPLE - TARGET_CPA - UNIFORM
                   OWNED_AND_OPERATED - TARGET_CPA - DEFAULT DISPLAY -
                   TARGET_CPA - UNIFORM
        """
        # Create or coerce a protobuf request object.
        # Sanity check: If we got a request object, we should *not* have
        # gotten any keyword arguments that map to the request.
        if request is not None and any([resource_name]):
            raise ValueError(
                "If the `request` argument is set, then none of "
                "the individual field arguments should be set."
            )
        # Minor optimization to avoid making a copy if the user passes
        # in a campaign_simulation_service.GetCampaignSimulationRequest.
        # There's no risk of modifying the input as we've already verified
        # there are no flattened fields.
        if not isinstance(
            request, campaign_simulation_service.GetCampaignSimulationRequest
        ):
            request = campaign_simulation_service.GetCampaignSimulationRequest(
                request
            )
            # If we have keyword arguments corresponding to fields on the
            # request, apply these.
            if resource_name is not None:
                request.resource_name = resource_name
        # Wrap the RPC method; this adds retry and timeout information,
        # and friendly error handling.
        rpc = self._transport._wrapped_methods[
            self._transport.get_campaign_simulation
        ]
        # Certain fields should be provided within the metadata header;
        # add these here.
        metadata = tuple(metadata) + (
            gapic_v1.routing_header.to_grpc_metadata(
                (("resource_name", request.resource_name),)
            ),
        )
        # Send the request.
        response = rpc(
            request, retry=retry, timeout=timeout, metadata=metadata,
        )
        # Done; return the response.
        return response
__all__ = ("CampaignSimulationServiceClient",)
 | |
| 
	# -*- coding: utf-8 -*-
############################ Copyrights and license ############################
#                                                                              #
# Copyright 2012 Vincent Jacques <[email protected]>                 #
# Copyright 2012 Zearin <[email protected]>                                      #
# Copyright 2013 AKFish <[email protected]>                                     #
# Copyright 2013 Vincent Jacques <[email protected]>                 #
# Copyright 2013 martinqt <[email protected]>                                  #
# Copyright 2014 Andy Casey <[email protected]>                            #
# Copyright 2014 Vincent Jacques <[email protected]>                 #
# Copyright 2016 Jannis Gebauer <[email protected]>                                #
# Copyright 2016 John Eskew <[email protected]>                                   #
# Copyright 2016 Peter Buckley <[email protected]>          #
# Copyright 2018 sfdye <[email protected]>                                      #
#                                                                              #
# This file is part of PyGithub.                                               #
# http://pygithub.readthedocs.io/                                              #
#                                                                              #
# PyGithub is free software: you can redistribute it and/or modify it under    #
# the terms of the GNU Lesser General Public License as published by the Free  #
# Software Foundation, either version 3 of the License, or (at your option)    #
# any later version.                                                           #
#                                                                              #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY  #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS    #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details.                                                                     #
#                                                                              #
# You should have received a copy of the GNU Lesser General Public License     #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>.             #
#                                                                              #
################################################################################
import github.GithubObject
import github.PaginatedList
import github.GitCommit
import github.NamedUser
import github.CommitStatus
import github.CommitCombinedStatus
import github.File
import github.CommitStats
import github.CommitComment
class Commit(github.GithubObject.CompletableGithubObject):
    """
    This class represents Commits. The reference can be found here http://developer.github.com/v3/git/commits/
    """
    def __repr__(self):
        return self.get__repr__({"sha": self._sha.value})
    @property
    def author(self):
        """
        :type: :class:`github.NamedUser.NamedUser`
        """
        self._completeIfNotSet(self._author)
        return self._author.value
    @property
    def comments_url(self):
        """
        :type: string
        """
        self._completeIfNotSet(self._comments_url)
        return self._comments_url.value
    @property
    def commit(self):
        """
        :type: :class:`github.GitCommit.GitCommit`
        """
        self._completeIfNotSet(self._commit)
        return self._commit.value
    @property
    def committer(self):
        """
        :type: :class:`github.NamedUser.NamedUser`
        """
        self._completeIfNotSet(self._committer)
        return self._committer.value
    @property
    def files(self):
        """
        :type: list of :class:`github.File.File`
        """
        self._completeIfNotSet(self._files)
        return self._files.value
    @property
    def html_url(self):
        """
        :type: string
        """
        self._completeIfNotSet(self._html_url)
        return self._html_url.value
    @property
    def parents(self):
        """
        :type: list of :class:`github.Commit.Commit`
        """
        self._completeIfNotSet(self._parents)
        return self._parents.value
    @property
    def sha(self):
        """
        :type: string
        """
        self._completeIfNotSet(self._sha)
        return self._sha.value
    @property
    def stats(self):
        """
        :type: :class:`github.CommitStats.CommitStats`
        """
        self._completeIfNotSet(self._stats)
        return self._stats.value
    @property
    def url(self):
        """
        :type: string
        """
        self._completeIfNotSet(self._url)
        return self._url.value
    def create_comment(self, body, line=github.GithubObject.NotSet, path=github.GithubObject.NotSet, position=github.GithubObject.NotSet):
        """
        :calls: `POST /repos/:owner/:repo/commits/:sha/comments <http://developer.github.com/v3/repos/comments>`_
        :param body: string
        :param line: integer
        :param path: string
        :param position: integer
        :rtype: :class:`github.CommitComment.CommitComment`
        """
        assert isinstance(body, (str, unicode)), body
        assert line is github.GithubObject.NotSet or isinstance(line, (int, long)), line
        assert path is github.GithubObject.NotSet or isinstance(path, (str, unicode)), path
        assert position is github.GithubObject.NotSet or isinstance(position, (int, long)), position
        post_parameters = {
            "body": body,
        }
        if line is not github.GithubObject.NotSet:
            post_parameters["line"] = line
        if path is not github.GithubObject.NotSet:
            post_parameters["path"] = path
        if position is not github.GithubObject.NotSet:
            post_parameters["position"] = position
        headers, data = self._requester.requestJsonAndCheck(
            "POST",
            self.url + "/comments",
            input=post_parameters
        )
        return github.CommitComment.CommitComment(self._requester, headers, data, completed=True)
    def create_status(self, state, target_url=github.GithubObject.NotSet, description=github.GithubObject.NotSet, context=github.GithubObject.NotSet):
        """
        :calls: `POST /repos/:owner/:repo/statuses/:sha <http://developer.github.com/v3/repos/statuses>`_
        :param state: string
        :param target_url: string
        :param description: string
        :param context: string
        :rtype: :class:`github.CommitStatus.CommitStatus`
        """
        assert isinstance(state, (str, unicode)), state
        assert target_url is github.GithubObject.NotSet or isinstance(target_url, (str, unicode)), target_url
        assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description
        assert context is github.GithubObject.NotSet or isinstance(context, (str, unicode)), context
        post_parameters = {
            "state": state,
        }
        if target_url is not github.GithubObject.NotSet:
            post_parameters["target_url"] = target_url
        if description is not github.GithubObject.NotSet:
            post_parameters["description"] = description
        if context is not github.GithubObject.NotSet:
            post_parameters["context"] = context
        headers, data = self._requester.requestJsonAndCheck(
            "POST",
            self._parentUrl(self._parentUrl(self.url)) + "/statuses/" + self.sha,
            input=post_parameters
        )
        return github.CommitStatus.CommitStatus(self._requester, headers, data, completed=True)
    def get_comments(self):
        """
        :calls: `GET /repos/:owner/:repo/commits/:sha/comments <http://developer.github.com/v3/repos/comments>`_
        :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.CommitComment.CommitComment`
        """
        return github.PaginatedList.PaginatedList(
            github.CommitComment.CommitComment,
            self._requester,
            self.url + "/comments",
            None
        )
    def get_statuses(self):
        """
        :calls: `GET /repos/:owner/:repo/statuses/:ref <http://developer.github.com/v3/repos/statuses>`_
        :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.CommitStatus.CommitStatus`
        """
        return github.PaginatedList.PaginatedList(
            github.CommitStatus.CommitStatus,
            self._requester,
            self._parentUrl(self._parentUrl(self.url)) + "/statuses/" + self.sha,
            None
        )
    def get_combined_status(self):
        """
        :calls: `GET /repos/:owner/:repo/commits/:ref/status/ <http://developer.github.com/v3/repos/statuses>`_
        :rtype: :class:`github.CommitCombinedStatus.CommitCombinedStatus`
        """
        headers, data = self._requester.requestJsonAndCheck(
            "GET",
            self.url + "/status"
        )
        return github.CommitCombinedStatus.CommitCombinedStatus(self._requester, headers, data, completed=True)
    @property
    def _identity(self):
        return self.sha
    def _initAttributes(self):
        self._author = github.GithubObject.NotSet
        self._comments_url = github.GithubObject.NotSet
        self._commit = github.GithubObject.NotSet
        self._committer = github.GithubObject.NotSet
        self._files = github.GithubObject.NotSet
        self._html_url = github.GithubObject.NotSet
        self._parents = github.GithubObject.NotSet
        self._sha = github.GithubObject.NotSet
        self._stats = github.GithubObject.NotSet
        self._url = github.GithubObject.NotSet
    def _useAttributes(self, attributes):
        if "author" in attributes:  # pragma no branch
            self._author = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["author"])
        if "comments_url" in attributes:  # pragma no branch
            self._comments_url = self._makeStringAttribute(attributes["comments_url"])
        if "commit" in attributes:  # pragma no branch
            self._commit = self._makeClassAttribute(github.GitCommit.GitCommit, attributes["commit"])
        if "committer" in attributes:  # pragma no branch
            self._committer = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["committer"])
        if "files" in attributes:  # pragma no branch
            self._files = self._makeListOfClassesAttribute(github.File.File, attributes["files"])
        if "html_url" in attributes:  # pragma no branch
            self._html_url = self._makeStringAttribute(attributes["html_url"])
        if "parents" in attributes:  # pragma no branch
            self._parents = self._makeListOfClassesAttribute(Commit, attributes["parents"])
        if "sha" in attributes:  # pragma no branch
            self._sha = self._makeStringAttribute(attributes["sha"])
        if "stats" in attributes:  # pragma no branch
            self._stats = self._makeClassAttribute(github.CommitStats.CommitStats, attributes["stats"])
        if "url" in attributes:  # pragma no branch
            self._url = self._makeStringAttribute(attributes["url"])
 | |
| 
	# -*- coding: utf-8 -*-
#
# Copyright (c) 2020, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Parser for MOPAC output files"""
# Based on parser in RMG-Py by Greg Magoon
# https://github.com/ReactionMechanismGenerator/RMG-Py/blob/master/external/cclib/parser/mopacparser.py
# Also parts from Ben Albrecht
# https://github.com/ben-albrecht/cclib/blob/master/cclib/parser/mopacparser.py
# Merged and modernized by Geoff Hutchison
import re
import math
import numpy
from cclib.parser import data
from cclib.parser import logfileparser
from cclib.parser import utils
def symbol2int(symbol):
    t = utils.PeriodicTable()
    return t.number[symbol]
class MOPAC(logfileparser.Logfile):
    """A MOPAC20XX output file."""
    def __init__(self, *args, **kwargs):
        super().__init__(logname="MOPAC", *args, **kwargs)
    def __str__(self):
        """Return a string representation of the object."""
        return "MOPAC log file %s" % (self.filename)
    def __repr__(self):
        """Return a representation of the object."""
        return 'MOPAC("%s")' % (self.filename)
    def normalisesym(self, label):
        """MOPAC does not require normalizing symmetry labels."""
        return label
    def before_parsing(self):
        #TODO
        # Defaults
        charge = 0
        self.set_attribute('charge', charge)
        mult = 1
        self.set_attribute('mult', mult)
        # Keep track of whether or not we're performing an
        # (un)restricted calculation.
        self.unrestricted = False
        self.is_rohf = False
        # Keep track of 1SCF vs. gopt since gopt is default
        self.onescf = False
        self.geomdone = False
        # Compile the dashes-and-or-spaces-only regex.
        self.re_dashes_and_spaces = re.compile(r'^[\s-]+$')
        self.star = ' * '
        self.stars = ' *******************************************************************************'
        self.spinstate = {'SINGLET': 1,
                          'DOUBLET': 2,
                          'TRIPLET': 3,
                          'QUARTET': 4,
                          'QUINTET': 5,
                          'SEXTET': 6,
                          'HEPTET': 7,
                          'OCTET': 8,
                          'NONET': 9}
    def extract(self, inputfile, line):
        """Extract information from the file object inputfile."""
        # Extract the package version.
        if "For non-commercial use only" in line:
            # Ignore the platorm information for now (the last character).
            self.metadata["package_version"] = line.split()[8][:-1]
            # Use the year as the legacy (short) package version.
            self.skip_lines(
                inputfile, ["Stewart Computational Chemistry", "s", "s", "s", "s"]
            )
            self.metadata["legacy_package_version"] = next(inputfile).split()[1][5:]
        # Extract the atomic numbers and coordinates from the optimized geometry
        # note that cartesian coordinates section occurs multiple times in the file, and we want to end up using the last instance
        # also, note that the section labeled cartesian coordinates doesn't have as many decimal places as the one used here
        # Example 1 (not used):
        #          CARTESIAN COORDINATES
        #
        #    NO.       ATOM               X         Y         Z
        #
        #     1         O                  4.7928   -0.8461    0.3641
        #     2         O                  5.8977   -0.3171    0.0092
        # ...
        # Example 2 (used):
        #   ATOM   CHEMICAL          X               Y               Z
        #  NUMBER    SYMBOL      (ANGSTROMS)     (ANGSTROMS)     (ANGSTROMS)
        #
        #     1       O          4.79280259  *  -0.84610232  *   0.36409474  *
        #     2       O          5.89768035  *  -0.31706418  *   0.00917035  *
        # ... etc.
        if line.split() == ["NUMBER", "SYMBOL", "(ANGSTROMS)", "(ANGSTROMS)", "(ANGSTROMS)"]:
            self.updateprogress(inputfile, "Attributes", self.cupdate)
            self.inputcoords = []
            self.inputatoms = []
            blankline = inputfile.next()
            atomcoords = []
            line = inputfile.next()
            while len(line.split()) > 6:
                # MOPAC Version 14.019L 64BITS suddenly appends this block with
                # "CARTESIAN COORDINATES" block with no blank line.
                tokens = line.split()
                self.inputatoms.append(symbol2int(tokens[1]))
                xc = float(tokens[2])
                yc = float(tokens[4])
                zc = float(tokens[6])
                atomcoords.append([xc, yc, zc])
                line = inputfile.next()
            self.inputcoords.append(atomcoords)
            if not hasattr(self, "natom"):
                self.atomnos = numpy.array(self.inputatoms, 'i')
                self.natom = len(self.atomnos)
        if 'CHARGE ON SYSTEM =' in line:
            charge = int(line.split()[5])
            self.set_attribute('charge', charge)
        if 'SPIN STATE DEFINED' in line:
            # find the multiplicity from the line token (SINGLET, DOUBLET, TRIPLET, etc)
            mult = self.spinstate[line.split()[1]]
            self.set_attribute('mult', mult)
        # Read energy (in kcal/mol, converted to eV)
        #
        # FINAL HEAT OF FORMATION =       -333.88606 KCAL =   -1396.97927 KJ
        if 'FINAL HEAT OF FORMATION =' in line:
            if not hasattr(self, "scfenergies"):
                self.scfenergies = []
            self.scfenergies.append(utils.convertor(utils.float(line.split()[5]), "kcal/mol", "eV"))
        # Molecular mass parsing (units will be amu)
        #
        # MOLECULAR WEIGHT        ==        130.1890
        if line[0:35] == '          MOLECULAR WEIGHT        =':
            self.molmass = utils.float(line.split()[3])
        #rotational constants
        #Example:
        #          ROTATIONAL CONSTANTS IN CM(-1)
        #
        #          A =    0.01757641   B =    0.00739763   C =    0.00712013
        # could also read in moment of inertia, but this should just differ by a constant: rot cons= h/(8*Pi^2*I)
        # note that the last occurence of this in the thermochemistry section has reduced precision,
        # so we will want to use the 2nd to last instance
        if line[0:40] == '          ROTATIONAL CONSTANTS IN CM(-1)':
            blankline = inputfile.next()
            rotinfo = inputfile.next()
            if not hasattr(self, "rotcons"):
                self.rotcons = []
            broken = rotinfo.split()
            # leave the rotational constants in Hz
            a = float(broken[2])
            b = float(broken[5])
            c = float(broken[8])
            self.rotcons.append([a, b, c])
        # Start of the IR/Raman frequency section.
        # Example:
        # VIBRATION    1    1A       ATOM PAIR        ENERGY CONTRIBUTION    RADIAL
        # FREQ.        15.08        C 12 --  C 16           +7.9% (999.0%)     0.0%
        # T-DIPOLE    0.2028        C 16 --  H 34           +5.8% (999.0%)    28.0%
        # TRAVEL      0.0240        C 16 --  H 32           +5.6% (999.0%)    35.0%
        # RED. MASS   1.7712        O  1 --  O  4           +5.2% (999.0%)     0.4%
        # EFF. MASS7752.8338
        #
        # VIBRATION    2    2A       ATOM PAIR        ENERGY CONTRIBUTION    RADIAL
        # FREQ.        42.22        C 11 --  C 15           +9.0% (985.8%)     0.0%
        # T-DIPOLE    0.1675        C 15 --  H 31           +6.6% (843.6%)     3.3%
        # TRAVEL      0.0359        C 15 --  H 29           +6.0% (802.8%)    24.5%
        # RED. MASS   1.7417        C 13 --  C 17           +5.8% (792.7%)     0.0%
        # EFF. MASS1242.2114
        if line[1:10] == 'VIBRATION':
            self.updateprogress(inputfile, "Frequency Information", self.fupdate)
            # get the vib symmetry
            if len(line.split()) >= 3:
                sym = line.split()[2]
                if not hasattr(self, 'vibsyms'):
                    self.vibsyms = []
                self.vibsyms.append(sym)
            line = inputfile.next()
            if 'FREQ' in line:
                if not hasattr(self, 'vibfreqs'):
                    self.vibfreqs = []
                freq = float(line.split()[1])
                self.vibfreqs.append(freq)
            line = inputfile.next()
            if 'T-DIPOLE' in line:
                if not hasattr(self, 'vibirs'):
                    self.vibirs = []
                tdipole = float(line.split()[1])
                # transform to km/mol
                self.vibirs.append(math.sqrt(tdipole))
            line = inputfile.next()
            if 'TRAVEL' in line:
                pass
            line = inputfile.next()
            if 'RED. MASS' in line:
                if not hasattr(self, 'vibrmasses'):
                    self.vibrmasses = []
                rmass = float(line.split()[2])
                self.vibrmasses.append(rmass)
        # Orbital eigenvalues, e.g.
        #           ALPHA EIGENVALUES
        #            BETA EIGENVALUES
        # or just "EIGENVALUES" for closed-shell
        if 'EIGENVALUES' in line:
            if not hasattr(self, 'moenergies'):
                self.moenergies = [] # list of arrays
            energies = []
            line = inputfile.next()
            while len(line.split()) > 0:
                energies.extend([float(i) for i in line.split()])
                line = inputfile.next()
            self.moenergies.append(energies)
        # todo:
        # Partial charges and dipole moments
        # Example:
        # NET ATOMIC CHARGES
        if line[:16] == '== MOPAC DONE ==':
            self.metadata['success'] = True
 | |
| 
	from __future__ import print_function, unicode_literals
import re
from decimal import Decimal as D
from aspen import Response
import pytest
from gratipay.security.user import SESSION
from gratipay.testing import Harness
from gratipay.wireup import find_files
overescaping_re = re.compile(r'&(#[0-9]{4}|[a-z]+);')
class TestPages(Harness):
    def browse(self, setup=None, **kw):
        alice = self.make_participant('alice', claimed_time='now', number='plural')
        exchange_id = self.make_exchange('balanced-cc', 19, 0, alice)
        alice.insert_into_communities(True, 'Wonderland', 'wonderland')
        alan = self.make_participant('alan', claimed_time='now')
        alice.add_member(alan)
        if setup:
            setup(alice)
        i = len(self.client.www_root)
        urls = []
        for spt in find_files(self.client.www_root, '*.spt'):
            url = spt[i:-4].replace('/%team/', '/alice/') \
                           .replace('/alice/%sub', '/alice/foo') \
                           .replace('/~/%username/', '/~alice/') \
                           .replace('/for/%slug/', '/for/wonderland/') \
                           .replace('/%platform/', '/github/') \
                           .replace('/%user_name/', '/gratipay/') \
                           .replace('/%membername', '/alan') \
                           .replace('/%exchange_id.int', '/%s' % exchange_id) \
                           .replace('/%redirect_to', '/giving') \
                           .replace('/%endpoint', '/public') \
                           .replace('/about/me/%sub', '/about/me')
            assert '/%' not in url
            if 'index' in url.split('/')[-1]:
                url = url.rsplit('/', 1)[0] + '/'
                urls.append(url)
        urls.extend("""
           /about/me
           /about/me/
           /about/me/history
        """.split())
        for url in urls:
            try:
                r = self.client.GET(url, **kw)
            except Response as r:
                if r.code == 404 or r.code >= 500:
                    raise
            assert r.code != 404
            assert r.code < 500
            assert not overescaping_re.search(r.body.decode('utf8'))
    def test_anon_can_browse(self):
        self.browse()
    def test_new_participant_can_browse(self):
        self.browse(auth_as='alice')
    def test_on_the_fence_can_browse(self):
        def setup(alice):
            bob = self.make_participant('bob', claimed_time='now', last_bill_result='')
            bob.set_tip_to(alice, D('1.00'))
        self.browse(setup, auth_as='alice')
    def test_escaping_on_homepage(self):
        self.make_participant('alice', claimed_time='now')
        expected = "<a href='/alice/'>"
        actual = self.client.GET('/', auth_as='alice').body
        assert expected in actual
    @pytest.mark.xfail(reason="migrating to Teams; #3399")
    def test_username_is_in_button(self):
        self.make_participant('alice', claimed_time='now')
        self.make_participant('bob', claimed_time='now')
        body = self.client.GET('/~alice/', auth_as='bob').body
        assert '<span class="zero">Give to alice</span>' in body
    @pytest.mark.xfail(reason="migrating to Teams; #3399")
    def test_username_is_in_unauth_giving_cta(self):
        self.make_participant('alice', claimed_time='now')
        body = self.client.GET('/~alice/').body
        assert 'give to alice' in body
    def test_widget(self):
        self.make_participant('cheese', claimed_time='now')
        expected = "javascript: window.open"
        actual = self.client.GET('/~cheese/widget.html').body
        assert expected in actual
    def test_github_associate(self):
        assert self.client.GxT('/on/github/associate').code == 400
    def test_twitter_associate(self):
        assert self.client.GxT('/on/twitter/associate').code == 400
    def test_about(self):
        expected = "give money every week"
        actual = self.client.GET('/about/').body
        assert expected in actual
    def test_about_stats(self):
        expected = "have joined Gratipay"
        actual = self.client.GET('/about/stats.html').body
        assert expected in actual
    def test_about_charts(self):
        assert self.client.GxT('/about/charts.html').code == 302
    def test_about_faq(self):
        expected = "What is Gratipay?"
        actual = self.client.GET('/about/faq.html').body.decode('utf8')
        assert expected in actual
    def test_about_teams_redirect(self):
        assert self.client.GxT('/about/teams/').code == 302
    def test_about_teams(self):
        expected = "Teams"
        actual = self.client.GET('/about/features/teams/').body.decode('utf8')
        assert expected in actual
    def test_404(self):
        response = self.client.GET('/about/four-oh-four.html', raise_immediately=False)
        assert "Not Found" in response.body
        assert "{%" not in response.body
    def test_for_contributors_redirects_to_inside_gratipay(self):
        loc = self.client.GxT('/for/contributors/').headers['Location']
        assert loc == 'http://inside.gratipay.com/'
    def test_mission_statement_also_redirects(self):
        assert self.client.GxT('/for/contributors/mission-statement.html').code == 302
    def test_anonymous_sign_out_redirects(self):
        response = self.client.PxST('/sign-out.html')
        assert response.code == 302
        assert response.headers['Location'] == '/'
    def test_sign_out_overwrites_session_cookie(self):
        self.make_participant('alice')
        response = self.client.PxST('/sign-out.html', auth_as='alice')
        assert response.code == 302
        assert response.headers.cookie[SESSION].value == ''
    def test_sign_out_doesnt_redirect_xhr(self):
        self.make_participant('alice')
        response = self.client.PxST('/sign-out.html', auth_as='alice',
                                    HTTP_X_REQUESTED_WITH=b'XMLHttpRequest')
        assert response.code == 200
    def test_settings_page_available_balance(self):
        self.make_participant('alice', claimed_time='now')
        self.db.run("UPDATE participants SET balance = 123.00 WHERE username = 'alice'")
        actual = self.client.GET("/~alice/settings/", auth_as="alice").body
        expected = "123"
        assert expected in actual
    def test_subscriptions_page(self):
        self.make_team(is_approved=True)
        alice = self.make_participant('alice', claimed_time='now')
        alice.set_subscription_to('TheATeam', "1.00")
        assert "The A Team" in self.client.GET("/~alice/subscriptions/", auth_as="alice").body
    def test_giving_page_shows_cancelled(self):
        self.make_team(is_approved=True)
        alice = self.make_participant('alice', claimed_time='now')
        alice.set_subscription_to('TheATeam', "1.00")
        alice.set_subscription_to('TheATeam', "0.00")
        assert "Cancelled" in self.client.GET("/~alice/subscriptions/", auth_as="alice").body
    def test_new_participant_can_edit_profile(self):
        self.make_participant('alice', claimed_time='now')
        body = self.client.GET("/~alice/", auth_as="alice").body
        assert b'Edit' in body
    def test_tilde_slash_redirects_to_tilde(self):
        self.make_participant('alice', claimed_time='now')
        response = self.client.GxT("/~/alice/", auth_as="alice")
        assert response.code == 302
        assert response.headers['Location'] == '/~alice/'
    def test_tilde_slash_redirects_subpages_with_querystring_to_tilde(self):
        self.make_participant('alice', claimed_time='now')
        response = self.client.GxT("/~/alice/foo/bar?baz=buz", auth_as="alice")
        assert response.code == 302
        assert response.headers['Location'] == '/~alice/foo/bar?baz=buz'
    def test_username_redirected_to_tilde(self):
        self.make_participant('alice', claimed_time='now')
        response = self.client.GxT("/alice/", auth_as="alice")
        assert response.code == 302
        assert response.headers['Location'] == '/~alice/'
    def test_username_redirects_everything_to_tilde(self):
        self.make_participant('alice', claimed_time='now')
        response = self.client.GxT("/alice/foo/bar?baz=buz", auth_as="alice")
        assert response.code == 302
        assert response.headers['Location'] == '/~alice/foo/bar?baz=buz'
    def test_team_slug__not__redirected_from_tilde(self):
        self.make_team(is_approved=True)
        assert self.client.GET("/TheATeam/").code == 200
        assert self.client.GxT("/~TheATeam/").code == 404
 | |
| 
	from django.contrib import admin
from django.contrib.admin import SimpleListFilter
from django.db import models
from django.db.models.fields import CharField, TextField
from django.forms import Textarea, ModelForm
from import_export.admin import ImportExportModelAdmin
from solo.admin import SingletonModelAdmin
from .models import Audiologist
from .models import AudiologistResource
from .models import Client
from .models import ClientResource
from .models import MeetingLog
from .models import MeetingLogResource
from .models import Provider
from .models import ProviderResource
from .models import IncomeSource
from .models import Settings
from .models import Grantor
from .models import GrantorResource
standard_textarea = Textarea(attrs={'rows': 3,
                                    'cols': 40,
                                    'style': 'height: 3.6em;'})
class DeleteNotAllowedModelAdmin(admin.ModelAdmin):
    def has_delete_permission(self, request, obj=None):
        return request.user.is_superuser
    
    
class AudiologistCurrentFilter(SimpleListFilter):
    '''
    Custom filter that defaults to "current" == True
    '''
    title = 'Status'
    parameter_name = 'current'
    def lookups(self, request, model_admin):
        return (
            ('a', 'All audiologists'),
            ('y', 'Current'),
            ('n', 'Inactive'),
        )
    def queryset(self, request, queryset):
        if self.value() == 'a':
            return queryset.filter()
        url_val_map = {
            'y': True,
            'n': False,
            None: True,
        }
        val = url_val_map[self.value()]
        return queryset.filter(current=val)
    
    def choices(self, cl, *a, **kw):
        yield {
            'selected': self.value() is None or self.value == 'y',
            'query_string': cl.get_query_string({}, [self.parameter_name]),
            'display': 'Current',
        }
        yield {
            'selected': self.value() == 'n',
            'query_string': cl.get_query_string({self.parameter_name: 'n'}, []),
            'display': 'Inactive',
        }
        yield {
            'selected': self.value() == 'a',
            'query_string': cl.get_query_string({self.parameter_name: 'a'}, []),
            'display': 'All',
        }
class AudiologistAdmin(DeleteNotAllowedModelAdmin, ImportExportModelAdmin):
    list_display = ('name', 'allowed', 'current')
    list_filter = (AudiologistCurrentFilter,)
    ordering = ('name',)
    resource_class = AudiologistResource
    formfield_overrides = {
        models.TextField: {
            'widget': standard_textarea,
        },
    }
class ClientIncomeInlineAdmin(admin.TabularInline):
    model = IncomeSource
    can_delete = True
    extra = 1
class MeetingLogInlineAdminForm(ModelForm):
    class Meta:
        model = MeetingLog
        fields = '__all__'
        widgets = {
            'results': standard_textarea,
        }
class MeetingLogInlineAdmin(admin.TabularInline):
    model = MeetingLog
    form = MeetingLogInlineAdminForm
    can_delete = True
    extra = 1
class DateYesNoFilter(SimpleListFilter):
    def lookups(self, request, model_admin):
        return (
            ('y', 'Yes'),
            ('n', 'No'),
        )
    def queryset(self, request, queryset):
        query = {}
        if self.value() == 'y':
            query = {self.field_name + '__isnull': False}
        elif self.value() == 'n':
            query = {self.field_name + '__isnull': True}
        return queryset.filter(**query)
class DeceasedFilter(DateYesNoFilter):
    title = 'Deceased'
    parameter_name = 'deceased'
    field_name = 'date_of_death'
class CostShareApprovedFilter(DateYesNoFilter):
    title = 'Cost Share Approved'
    parameter_name = 'cost_share_approved'
    field_name = 'cost_share_approval'
class UpdateMeetingFilter(DateYesNoFilter):
    title = 'Had Update Meeting'
    parameter_name = 'update_meeting'
    field_name = 'update_meeting'
class ProviderAuthReqFilter(DateYesNoFilter):
    title = 'Provider Auth Requested'
    parameter_name = 'provider_auth_requested'
    field_name = 'provider_auth_requested'
class ProviderAuthRecvFilter(DateYesNoFilter):
    title = 'Provider Auth Rcvd'
    parameter_name = 'provider_auth_received'
    field_name = 'provider_auth_received'
class AudiologistReferredFilter(DateYesNoFilter):
    title = 'Audiologist Referred'
    parameter_name = 'audiologist_referral_date'
    field_name = 'audiologist_referral_date'
class AudiologistApptFilter(DateYesNoFilter):
    title = 'Audiologist Appt Set'
    parameter_name = 'audiologist_appointment_date'
    field_name = 'audiologist_appointment_date'
class AudiologistInvoicedFilter(DateYesNoFilter):
    title = 'Audiologist Invoiced'
    parameter_name = 'audiologist_invoiced_date'
    field_name = 'audiologist_invoiced_date'
class ClientAdmin(ImportExportModelAdmin):
    resource_class = ClientResource
    list_display = ('last_name', 'first_name', 'intake_date', 'last_updated', 'hearing_loss', 'audiologist', 'client_grantors', 'cost_share', 'cost_share_approval')
    list_display_links = ('last_name', 'first_name',)
    list_filter = ('provider', 'audiologist', 'grantors', 'family_size', 'hearing_loss',
                   DeceasedFilter, CostShareApprovedFilter, UpdateMeetingFilter, 'update_meeting',
                   ProviderAuthReqFilter, ProviderAuthRecvFilter,
                   AudiologistReferredFilter, AudiologistApptFilter, AudiologistInvoicedFilter,
                   'equipment_requested', 'adaptive_equipment', 'hearing_aid_assistance',
                   'last_updated',
                   'quota_client', 'deliverable', 'non_kcsm',
                   'intake_staff', 'data_entry_staff')
    ordering = ('-intake_date',)
    date_hierarchy = 'intake_date'
    search_fields = [f.name for f in Client._meta.local_fields if isinstance(f, (CharField, TextField))]
    formfield_overrides = {
        models.TextField: {
            'widget': standard_textarea,
        },
    }
    inlines = (ClientIncomeInlineAdmin,MeetingLogInlineAdmin)
    readonly_fields = ('id', 'last_updated')
    fieldsets = (
        (None, {
            'fields': (
                'id', 'napis_id',
            )
        }),
        ('Personal Info', {
            'fields': (
                'first_name', 'last_name', 'gender', 'date_of_birth', 'date_of_death',
                'is_veteran', 'lives_alone', 'spouse', 'family_size',
            )
        }),
        ('Contact', {
            'fields': (
                'address', 'city', 'county', 'state', 'zip_code', 'deliverable',
                'email', 'phone',
                'emergency_contact',
                'emergency_phone',
            )
        }),
        ('Notes', {
            'fields': (
                'notes',
            )
        }),
        ('Demographics', {
            'fields': (
                'race', 'is_hispanic',
                'multiracial', 'multiracial_white', 'multiracial_black', 'multiracial_asian', 'multiracial_amind',
            )
        }),
        ('Assistance', {
            'fields': (
                'hearing_loss', 'aids_requested_left', 'aids_requested_right', 'equipment_requested',
                'hearing_assistance', 'adaptive_equipment', 'hearing_aid_assistance',
                'equipment_borrowed',
            )
        }),
        ('Additional Forms', {
            'fields': (
                'proof_of_age', 'signed_client_intake', 'signed_disclosure_authorization',
                'signed_confidentiality_policy', 'signed_gross_annual_income',
                'signed_client_responsibility_fees'
            )
        }),
        ('DHHS', {
            'fields': (
                'intake_date', 'intake_staff', 'data_entry_staff', 'last_updated', 'referrer',
                'update_meeting',
                'cost_share_approval', 'cost_share',
                'quota_client', 'non_kcsm', 'grantors',
                'provider', 'audient_id', 'provider_auth_requested', 'provider_auth_received',
            )
        }),
        ('Audiologist', {
            'fields': (
                'audiologist', 'audiologist_referral_date', 'audiologist_appointment_date', 
                'audiologist_invoiced_date', 'audiologist_invoiced_amount',
            )
        }),
    )
class MeetingLogAdmin(ImportExportModelAdmin):
    resource_class = MeetingLogResource
    list_display = ('client', 'contact_date', 'consultation_time', 'paperwork_time', 'units', 'results', 'user')
    list_display_links = ('contact_date',)
    list_filter = ('client', 'contact_date', 'user')
    ordering = ('-contact_date',)
    date_hierarchy = 'contact_date'
    formfield_overrides = {
        models.TextField: {
            'widget': standard_textarea,
        },
    }
    def units(self, obj):
        return (obj.consultation_time + obj.paperwork_time) / 60
class ProviderAdmin(ImportExportModelAdmin):
    ordering = ('name',)
    resource_class = ProviderResource
    formfield_overrides = {
        models.TextField: {
            'widget': standard_textarea,
        },
    }
class GrantorAdmin(ImportExportModelAdmin):
    ordering = ('name',)
    resource_class = GrantorResource
    formfield_overrides = {
        models.TextField: {
            'widget': standard_textarea,
        },
    }
admin.site.disable_action('delete_selected')
admin.site.site_header = 'Deaf & Hard of Hearing Services - ADAPT'
admin.site.site_title = 'ADAPT'
admin.site.site_url = None
admin.site.index_title = ''
admin.site.register(Audiologist, AudiologistAdmin)
admin.site.register(Client, ClientAdmin)
admin.site.register(Provider, ProviderAdmin)
admin.site.register(Grantor, GrantorAdmin)
admin.site.register(MeetingLog, MeetingLogAdmin)
admin.site.register(Settings, SingletonModelAdmin)
 | |
| 
	"""
Tests for both experiment.py and experiment_set.py
"""
import pytest
from snovault import TYPES
# from snovault.storage import UUID
from uuid import uuid4
from ..types.experiment import ExperimentHiC
pytestmark = [pytest.mark.setone, pytest.mark.working]
@pytest.fixture
def custom_experiment_set_data(lab, award):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'description': 'test experiment set',
        'experimentset_type': 'custom',
        'status': 'in review by lab'
    }
@pytest.fixture
def custom_experiment_set(testapp, custom_experiment_set_data):
    return testapp.post_json('/experiment_set', custom_experiment_set_data).json['@graph'][0]
@pytest.fixture
def replicate_experiment_set_data(lab, award):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'description': 'test replicate set',
        'experimentset_type': 'replicate',
        'status': 'in review by lab'
    }
@pytest.fixture
def replicate_experiment_set(testapp, replicate_experiment_set_data):
    return testapp.post_json('/experiment_set_replicate', replicate_experiment_set_data).json['@graph'][0]
@pytest.fixture
def sop_map_data(protocol, lab, award):
    return {
        "sop_name": "in situ Hi-C SOP map",
        "sop_version": 1,
        'lab': lab['@id'],
        'award': award['@id'],
        "associated_item_type": "ExperimentHiC",
        "id_values": ["in situ Hi-C"],
        "notes": "This is just a dummy insert not linked to true SOP protocol",
        "description": "Fields with specified defaults in the SOP for in situ Hi-C experiments as per ??",
        "sop_protocol": protocol['@id'],
        "fields_with_default": [
            {"field_name": "digestion_enzyme", "field_value": "MboI"},
        ]
    }
@pytest.fixture
def sop_map_data_2(lab, award):
        return {
            "sop_name": "Second in situ hic map",
            "sop_version": 2,
            'lab': lab['@id'],
            'award': award['@id'],
            "associated_item_type": "ExperimentHiC",
            "id_values": ["in situ Hi-C"],
            "notes": "This is a dummy second version of map",
            "description": "Second",
        }
def test_experiment_update_experiment_relation(testapp, base_experiment, experiment):
    relation = [{'relationship_type': 'controlled by',
                 'experiment': experiment['@id']}]
    res = testapp.patch_json(base_experiment['@id'], {'experiment_relation': relation})
    assert res.json['@graph'][0]['experiment_relation'] == relation
    # patching an experiement should also update the related experiement
    exp_res = testapp.get(experiment['@id'])
    exp_res_id = exp_res.json['experiment_relation'][0]['experiment']['@id']
    assert exp_res_id == base_experiment['@id']
def test_experiment_update_hic_sop_mapping_added_on_submit(testapp, experiment_data, sop_map_data):
    res_sop = testapp.post_json('/sop_map', sop_map_data, status=201)
    res_exp = testapp.post_json('/experiment_hi_c', experiment_data)
    assert 'sop_mapping' in res_exp.json['@graph'][0]
    assert res_exp.json['@graph'][0]['sop_mapping']['has_sop'] == "Yes"
    assert res_exp.json['@graph'][0]['sop_mapping']['sop_map'] == res_sop.json['@graph'][0]['@id']
def test_experiment_update_hic_sop_mapping_has_map_is_no(testapp, experiment_data, exp_types):
    experiment_data['experiment_type'] = exp_types['dnase']['@id']
    res_exp = testapp.post_json('/experiment_hi_c', experiment_data)
    assert 'sop_mapping' in res_exp.json['@graph'][0]
    assert res_exp.json['@graph'][0]['sop_mapping']['has_sop'] == "No"
def test_experiment_update_hic_sop_mapping_has_sop2no_when_only_sopmap_deleted(
        testapp, experiment_data, sop_map_data):
    sop_map_data['status'] = 'deleted'
    testapp.post_json('/sop_map', sop_map_data, status=201)
    res_exp = testapp.post_json('/experiment_hi_c', experiment_data)
    assert 'sop_mapping' in res_exp.json['@graph'][0]
    assert res_exp.json['@graph'][0]['sop_mapping']['has_sop'] == "No"
def test_experiment_update_hic_sop_mapping_to_v2_when_2_versions(
        testapp, experiment_data, sop_map_data, sop_map_data_2):
    testapp.post_json('/sop_map', sop_map_data, status=201)
    res2chk = testapp.post_json('/sop_map', sop_map_data_2, status=201)
    res_exp = testapp.post_json('/experiment_hi_c', experiment_data)
    assert 'sop_mapping' in res_exp.json['@graph'][0]
    assert res_exp.json['@graph'][0]['sop_mapping']['has_sop'] == "Yes"
    assert res_exp.json['@graph'][0]['sop_mapping']['sop_map'] == res2chk.json['@graph'][0]['@id']
def test_experiment_update_hic_sop_mapping_to_v1_when_v2_deleted(
        testapp, experiment_data, sop_map_data, sop_map_data_2):
    res2chk = testapp.post_json('/sop_map', sop_map_data, status=201)
    sop_map_data_2['status'] = 'deleted'
    testapp.post_json('/sop_map', sop_map_data_2, status=201)
    res_exp = testapp.post_json('/experiment_hi_c', experiment_data)
    assert 'sop_mapping' in res_exp.json['@graph'][0]
    assert res_exp.json['@graph'][0]['sop_mapping']['has_sop'] == "Yes"
    assert res_exp.json['@graph'][0]['sop_mapping']['sop_map'] == res2chk.json['@graph'][0]['@id']
def test_experiment_update_hic_sop_map_not_added_when_already_present(testapp, experiment_data):
    experiment_data['sop_mapping'] = {}
    experiment_data['sop_mapping']['has_sop'] = 'No'
    res = testapp.post_json('/experiment_hi_c', experiment_data)
    assert 'sop_mapping' in res.json['@graph'][0]
    assert res.json['@graph'][0]['sop_mapping']['has_sop'] == "No"
    assert 'sop_map' not in res.json['@graph'][0]['sop_mapping']
def test_calculated_experiment_summary(testapp, experiment, mboI):
    summary = 'in situ Hi-C on GM12878 with MboI'
    res = testapp.patch_json(experiment['@id'], {'digestion_enzyme': mboI['@id']}, status=200)
    assert res.json['@graph'][0]['experiment_summary'] == summary
    assert summary in res.json['@graph'][0]['display_title']
def test_experiment_summary_repliseq(repliseq_4):
    assert repliseq_4.get('experiment_summary') == '2-stage Repli-seq on GM12878 S-phase early'
# test for experiment_set_replicate _update function
def test_experiment_set_replicate_update_adds_experiments_in_set(testapp, experiment, replicate_experiment_set):
    assert not replicate_experiment_set['experiments_in_set']
    res = testapp.patch_json(
        replicate_experiment_set['@id'],
        {'replicate_exps':
            [{'replicate_exp': experiment['@id'], 'bio_rep_no': 1, 'tec_rep_no': 1}]},
        status=200)
    assert experiment['@id'] in res.json['@graph'][0]['experiments_in_set']
# test for default_embedding practice with embedded list
# this test should change should any of the reference embeds below be altered
def test_experiment_set_default_embedded_list(registry, exp_types):
    exp_data = {
        'experiment_type': exp_types['microc']['uuid'],
        'status': 'in review by lab'
    }
    # create experimentHiC obj; _update (and by extension, add_default_embeds)
    # are called automatically
    test_exp = ExperimentHiC.create(registry, None, exp_data)
    # call reify embedded property (defined in snovault/resources.py)
    embedded = test_exp.embedded
    embedded_list = test_exp.embedded_list
    type_info_embedded = registry[TYPES]['experiment_hi_c'].embedded_list
    assert type_info_embedded == embedded_list
    if 'produced_in_pub.*' in embedded_list:
        assert 'produced_in_pub.*' in embedded
        assert 'produced_in_pub.award.@id' in embedded
        assert 'produced_in_pub.award.@type' in embedded
        assert 'produced_in_pub.award.principals_allowed.*' in embedded
        assert 'produced_in_pub.award.display_title' in embedded
        assert 'produced_in_pub.award.uuid' in embedded
    assert 'experiment_sets.accession' in embedded_list
    assert 'experiment_sets.@id' in embedded
    assert 'experiment_sets.@type' in embedded
    assert 'experiment_sets.principals_allowed.*' in embedded
    assert 'experiment_sets.display_title' in embedded
    assert 'experiment_sets.uuid' in embedded
# tests for the experiment_sets calculated properties
def test_calculated_experiment_sets_for_custom_experiment_set(testapp, experiment, custom_experiment_set):
    assert len(experiment['experiment_sets']) == 0
    res = testapp.patch_json(custom_experiment_set['@id'], {'experiments_in_set': [experiment['@id']]}, status=200)
    expt_res = testapp.get(experiment['@id'])
    assert custom_experiment_set['uuid'] == expt_res.json['experiment_sets'][0]['uuid']
def test_calculated_experiment_sets_for_replicate_experiment_set(testapp, experiment, replicate_experiment_set):
    assert len(experiment['experiment_sets']) == 0
    res = testapp.patch_json(
        replicate_experiment_set['@id'],
        {'replicate_exps':
            [{'replicate_exp': experiment['@id'], 'bio_rep_no': 1, 'tec_rep_no': 1}]},
        status=200)
    expt_res = testapp.get(experiment['@id'])
    assert replicate_experiment_set['uuid'] == expt_res.json['experiment_sets'][0]['uuid']
@pytest.fixture
def pub1_data(lab, award):
    # encode paper published 2012-09-06
    return {
        'award': award['@id'],
        'lab': lab['@id'],
        'ID': "PMID:22955616"
    }
@pytest.fixture
def pub2_data(lab, award):
    # Sanborn et al paper published 2015-11-24
    return {
        'award': award['@id'],
        'lab': lab['@id'],
        'ID': "PMID:26499245"
    }
def test_calculated_produced_in_pub_for_rep_experiment_set(testapp, replicate_experiment_set, pub1_data):
    # post single rep_exp_set to single pub
    pub1_data['exp_sets_prod_in_pub'] = [replicate_experiment_set['@id']]
    pub1res = testapp.post_json('/publication', pub1_data, status=201)
    expsetres = testapp.get(replicate_experiment_set['@id'])
    assert 'produced_in_pub' in expsetres
    assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' in expsetres.json['produced_in_pub'].values()
def test_calculated_produced_in_pub_for_cust_experiment_set(testapp, custom_experiment_set, pub1_data):
    # post single cust_exp_set to single pub
    pub1_data['exp_sets_prod_in_pub'] = [custom_experiment_set['@id']]
    pub1res = testapp.post_json('/publication', pub1_data, status=201)
    expsetres = testapp.get(custom_experiment_set['@id'])
    assert 'produced_in_pub' in expsetres
    assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' in expsetres.json['produced_in_pub'].values()
def test_calculated_produced_in_pub_for_two_experiment_set_to_one_pub(
        testapp, replicate_experiment_set, custom_experiment_set, pub1_data):
    # post two exp_set to single pub
    pub1_data['exp_sets_prod_in_pub'] = [replicate_experiment_set['@id'], custom_experiment_set['@id']]
    pub1res = testapp.post_json('/publication', pub1_data, status=201)
    responses = [testapp.get(replicate_experiment_set['@id']),
                 testapp.get(custom_experiment_set['@id'])]
    for response in responses:
        assert 'produced_in_pub' in response
        assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' == response.json['produced_in_pub']['@id']
def test_calculated_produced_in_pub_for_two_experiment_set_two_pubs(
        testapp, replicate_experiment_set, custom_experiment_set, pub1_data, pub2_data):
    # post different exp_set to each pub
    pub1_data['exp_sets_prod_in_pub'] = [replicate_experiment_set['@id']]
    pub2_data['exp_sets_prod_in_pub'] = [custom_experiment_set['@id']]
    pub1res = testapp.post_json('/publication', pub1_data, status=201)
    pub2res = testapp.post_json('/publication', pub2_data, status=201)
    responses = [testapp.get(replicate_experiment_set['@id']),
                 testapp.get(custom_experiment_set['@id'])]
    for response in responses:
        assert 'produced_in_pub' in response
    assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' == responses[0].json['produced_in_pub']['@id']
    assert '/publications/' + pub2res.json['@graph'][0]['uuid'] + '/' == responses[1].json['produced_in_pub']['@id']
def test_calculated_produced_in_pub_for_one_experiment_set_two_pubs(
        testapp, replicate_experiment_set, pub1_data, pub2_data):
    # post one exp_set to two pubs - this one should pick up only the most recent pub
    pub1_data['exp_sets_prod_in_pub'] = [replicate_experiment_set['@id']]
    pub2_data['exp_sets_prod_in_pub'] = [replicate_experiment_set['@id']]
    pub1res = testapp.post_json('/publication', pub1_data, status=201)
    pub2res = testapp.post_json('/publication', pub2_data, status=201)
    response = testapp.get(replicate_experiment_set['@id'])
    assert 'produced_in_pub' in response
    assert not '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' == response.json['produced_in_pub']['@id']
    assert '/publications/' + pub2res.json['@graph'][0]['uuid'] + '/' == response.json['produced_in_pub']['@id']
def test_calculated_publications_in_experiment_set_no_data(
        testapp, replicate_experiment_set, custom_experiment_set, pub1_data):
    pub1res = testapp.post_json('/publication', pub1_data, status=201)
    print(replicate_experiment_set)
    print(custom_experiment_set)
    assert not replicate_experiment_set['publications_of_set']
    assert not custom_experiment_set['publications_of_set']
def test_calculated_publications_in_rep_experiment_set_2_fields(
        testapp, replicate_experiment_set, pub1_data):
    # post single rep_exp_set to single pub both fields
    pub1_data['exp_sets_prod_in_pub'] = [replicate_experiment_set['@id']]
    pub1_data['exp_sets_used_in_pub'] = [replicate_experiment_set['@id']]
    pub1res = testapp.post_json('/publication', pub1_data, status=201)
    response = testapp.get(replicate_experiment_set['@id'])
    print(response)
    print('JSON:', response.json)
    assert 'publications_of_set' in response
    assert len(response.json['publications_of_set']) == 1
    assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' in response.json['publications_of_set'][0].values()
def test_calculated_publications_in_cust_experiment_set_used_in_field(
        testapp, custom_experiment_set, pub1_data):
    # post only used in publication one pub one exp set
    pub1_data['exp_sets_used_in_pub'] = [custom_experiment_set['@id']]
    pub1res = testapp.post_json('/publication', pub1_data, status=201)
    response = testapp.get(custom_experiment_set['@id'])
    assert 'publications_of_set' in response
    assert len(response.json['publications_of_set']) == 1
    assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' in response.json['publications_of_set'][0].values()
def test_calculated_publications_in_rep_experiment_set_two_pubs_both_fields(
        testapp, replicate_experiment_set, pub1_data, pub2_data):
    # post same experiment set to two pubs in either field
    pub1_data['exp_sets_prod_in_pub'] = [replicate_experiment_set['@id']]
    pub2_data['exp_sets_used_in_pub'] = [replicate_experiment_set['@id']]
    pub1res = testapp.post_json('/publication', pub1_data, status=201)
    pub2res = testapp.post_json('/publication', pub2_data, status=201)
    response = testapp.get(replicate_experiment_set['@id'])
    assert 'publications_of_set' in response
    assert len(response.json['publications_of_set']) == 2
    publications = response.json['publications_of_set']
    combined_pub_vals = [p['@id'] for p in publications]
    assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' in combined_pub_vals
    assert '/publications/' + pub2res.json['@graph'][0]['uuid'] + '/' in combined_pub_vals
def test_calculated_publications_in_rep_experiment_set_two_pubs_in_used(
        testapp, replicate_experiment_set, pub1_data, pub2_data):
    # post same experiment set to two pubs in used in pub field
    pub1_data['exp_sets_used_in_pub'] = [replicate_experiment_set['@id']]
    pub2_data['exp_sets_used_in_pub'] = [replicate_experiment_set['@id']]
    pub1res = testapp.post_json('/publication', pub1_data, status=201)
    pub2res = testapp.post_json('/publication', pub2_data, status=201)
    response = testapp.get(replicate_experiment_set['@id'])
    assert 'publications_of_set' in response
    assert len(response.json['publications_of_set']) == 2
    publications = response.json['publications_of_set']
    combined_pub_vals = list(publications[0].values()) + list(publications[1].values())
    assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' in combined_pub_vals
    assert '/publications/' + pub2res.json['@graph'][0]['uuid'] + '/' in combined_pub_vals
# experiment pub calculated properties tests
@pytest.fixture
def repset_w_exp1(testapp, replicate_experiment_set_data, experiment):
    repset = replicate_experiment_set_data
    repset['replicate_exps'] = [{'replicate_exp': experiment['@id'], 'bio_rep_no': 1, 'tec_rep_no': 1}]
    return testapp.post_json('/experiment_set_replicate', repset).json['@graph'][0]
@pytest.fixture
def experiment2(testapp, experiment_data, exp_types):
    experiment_data['experiment_type'] = exp_types['capc']['@id']
    return testapp.post_json('/experiment_capture_c', experiment_data).json['@graph'][0]
@pytest.fixture
def custset_w_exp1(testapp, custom_experiment_set_data, experiment):
    custset = custom_experiment_set_data
    custset['experiments_in_set'] = [experiment['@id']]
    return testapp.post_json('/experiment_set', custset).json['@graph'][0]
@pytest.fixture
def custset_w_exp2(testapp, custom_experiment_set_data, experiment2):
    custset = custom_experiment_set_data
    custset['experiments_in_set'] = [experiment2['@id']]
    return testapp.post_json('/experiment_set', custset).json['@graph'][0]
def test_calculated_expt_produced_in_pub_for_rep_experiment_set(
        testapp, repset_w_exp1, pub1_data):
    # post single rep_exp_set to single pub
    pub1_data['exp_sets_prod_in_pub'] = [repset_w_exp1['@id']]
    pub1res = testapp.post_json('/publication', pub1_data, status=201)
    expres = testapp.get(repset_w_exp1['replicate_exps'][0]['replicate_exp'])
    # import pdb; pdb.set_trace()
    assert 'produced_in_pub' in expres
    assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' == expres.json['produced_in_pub']['@id']
def test_calculated_expt_produced_in_pub_for_expt_w_ref(
        testapp, experiment_data, replicate_experiment_set_data, pub2_data, publication):
    experiment_data['references'] = [publication['@id']]
    # just check experiment by itself first
    expt = testapp.post_json('/experiment_hi_c', experiment_data, status=201).json['@graph'][0]
    assert 'produced_in_pub' in expt
    assert publication['@id'] == expt['produced_in_pub']
    # post repset with this experiment
    replicate_experiment_set_data['replicate_exps'] = [{'bio_rep_no': 1, 'tec_rep_no': 1, 'replicate_exp': expt['@id']}]
    repset = testapp.post_json('/experiment_set_replicate', replicate_experiment_set_data, status=201).json['@graph'][0]
    # post single rep_exp_set to single pub
    pub2_data['exp_sets_prod_in_pub'] = [repset['@id']]
    testapp.post_json('/publication', pub2_data, status=201)
    expinset = testapp.get(repset['replicate_exps'][0]['replicate_exp']).json
    assert 'produced_in_pub' in expinset
    assert publication['@id'] == expinset['produced_in_pub']['@id']
def test_calculated_expt_produced_in_pub_for_cust_experiment_set(
        testapp, custset_w_exp1, pub1_data):
    # post single cust_exp_set to single pub
    pub1_data['exp_sets_prod_in_pub'] = [custset_w_exp1['@id']]
    pub1res = testapp.post_json('/publication', pub1_data, status=201)
    expres = testapp.get(custset_w_exp1['experiments_in_set'][0])
    assert 'produced_in_pub' not in expres.json.keys()
def test_calculated_expt_produced_in_pub_for_one_expt_in_two_expset_one_pub(
        testapp, repset_w_exp1, custset_w_exp1, pub1_data):
    # post two exp_set with same experiment (repset and custset) to single pub
    pub1_data['exp_sets_prod_in_pub'] = [repset_w_exp1['@id'], custset_w_exp1['@id']]
    pub1res = testapp.post_json('/publication', pub1_data, status=201)
    # both responses will get the same experiment
    responses = [testapp.get(repset_w_exp1['replicate_exps'][0]['replicate_exp']),
                 testapp.get(custset_w_exp1['experiments_in_set'][0])]
    for response in responses:
        assert 'produced_in_pub' in response
        assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' == response.json['produced_in_pub']['@id']
def test_calculated_expt_produced_in_pub_for_two_exp_two_expset_two_pubs(
        testapp, repset_w_exp1, custset_w_exp2, pub1_data, pub2_data):
    # post 2 exp_set (one repset, one custom) each with diff expt to each pub
    # only expt in repset should get the pub of repset
    pub1_data['exp_sets_prod_in_pub'] = [repset_w_exp1['@id']]
    pub2_data['exp_sets_prod_in_pub'] = [custset_w_exp2['@id']]
    pub1res = testapp.post_json('/publication', pub1_data, status=201)
    testapp.post_json('/publication', pub2_data, status=201)
    responses = [testapp.get(repset_w_exp1['replicate_exps'][0]['replicate_exp']),
                 testapp.get(custset_w_exp2['experiments_in_set'][0])]
    assert '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' == responses[0].json['produced_in_pub']['@id']
    assert 'produced_in_pub' not in responses[1].json
def test_calculated_expt_produced_in_pub_for_one_expt_one_expset_two_pubs(
        testapp, repset_w_exp1, pub1_data, pub2_data):
    # post one exp_set to two pubs - this one should pick up only the most recent pub
    pub1_data['exp_sets_prod_in_pub'] = [repset_w_exp1['@id']]
    pub2_data['exp_sets_prod_in_pub'] = [repset_w_exp1['@id']]
    pub1res = testapp.post_json('/publication', pub1_data, status=201)
    pub2res = testapp.post_json('/publication', pub2_data, status=201)
    response = testapp.get(repset_w_exp1['replicate_exps'][0]['replicate_exp'])
    assert 'produced_in_pub' in response
    assert not '/publications/' + pub1res.json['@graph'][0]['uuid'] + '/' == response.json['produced_in_pub']['@id']
    assert '/publications/' + pub2res.json['@graph'][0]['uuid'] + '/' == response.json['produced_in_pub']['@id']
def test_calculated_publications_in_experiment_no_data(
        testapp, repset_w_exp1, custset_w_exp2, pub1_data):
    pub1res = testapp.post_json('/publication', pub1_data, status=201)
    responses = [testapp.get(repset_w_exp1['replicate_exps'][0]['replicate_exp']),
                 testapp.get(custset_w_exp2['experiments_in_set'][0])]
    for response in responses:
        assert response.json['publications_of_exp'] == []
def test_calculated_publications_in_expt_w_repset_in_both_fields(
        testapp, repset_w_exp1, pub1_data):
    # post single rep_exp_set to single pub both fields
    pub1_data['exp_sets_prod_in_pub'] = [repset_w_exp1['@id']]
    pub1_data['exp_sets_used_in_pub'] = [repset_w_exp1['@id']]
    pub1res = testapp.post_json('/publication', pub1_data, status=201)
    response = testapp.get(repset_w_exp1['replicate_exps'][0]['replicate_exp'])
    assert 'publications_of_exp' in response
    assert len(response.json['publications_of_exp']) == 1
    assert pub1res.json['@graph'][0]['uuid'] == response.json['publications_of_exp'][0]['uuid']
def test_calculated_publications_in_expt_w_custset_used_in_field(
        testapp, custset_w_exp2, pub1_data):
    # post only used in publication one pub one exp set
    pub1_data['exp_sets_used_in_pub'] = [custset_w_exp2['@id']]
    pub1res = testapp.post_json('/publication', pub1_data, status=201)
    response = testapp.get(custset_w_exp2['experiments_in_set'][0])
    assert 'publications_of_exp' in response
    assert len(response.json['publications_of_exp']) == 1
    assert pub1res.json['@graph'][0]['uuid'] == response.json['publications_of_exp'][0]['uuid']
def test_calculated_publications_in_expt_w_repset_two_pubs_both_fields(
        testapp, repset_w_exp1, pub1_data, pub2_data):
    # post same experiment set to two pubs in either field
    pub1_data['exp_sets_prod_in_pub'] = [repset_w_exp1['@id']]
    pub2_data['exp_sets_used_in_pub'] = [repset_w_exp1['@id']]
    pub1res = testapp.post_json('/publication', pub1_data, status=201)
    pub2res = testapp.post_json('/publication', pub2_data, status=201)
    pubuuids = [pub1res.json['@graph'][0]['uuid']]
    pubuuids.append(pub2res.json['@graph'][0]['uuid'])
    response = testapp.get(repset_w_exp1['replicate_exps'][0]['replicate_exp'])
    assert 'publications_of_exp' in response
    assert len(response.json['publications_of_exp']) == 2
    publications = response.json['publications_of_exp']
    for pub in publications:
        assert pub['uuid'] in pubuuids
def test_calculated_publications_in_expt_w_repset_two_pubs_in_used(
        testapp, repset_w_exp1, pub1_data, pub2_data):
    # post same experiment set to two pubs in used in pub field
    pub1_data['exp_sets_used_in_pub'] = [repset_w_exp1['@id']]
    pub2_data['exp_sets_used_in_pub'] = [repset_w_exp1['@id']]
    pub1res = testapp.post_json('/publication', pub1_data, status=201)
    pub2res = testapp.post_json('/publication', pub2_data, status=201)
    pubuuids = [pub1res.json['@graph'][0]['uuid']]
    pubuuids.append(pub2res.json['@graph'][0]['uuid'])
    response = testapp.get(repset_w_exp1['replicate_exps'][0]['replicate_exp'])
    assert 'publications_of_exp' in response
    assert len(response.json['publications_of_exp']) == 2
    publications = response.json['publications_of_exp']
    for pub in publications:
        assert pub['uuid'] in pubuuids
def test_calculated_no_of_expts_in_set_w_no_exps(empty_replicate_set):
    assert 'number_of_experiments' not in empty_replicate_set
def test_calculated_no_of_expts_in_set_w_2_exps(two_experiment_replicate_set):
    assert two_experiment_replicate_set['number_of_experiments'] == 2
# tests for category calculated_property
@pytest.fixture
def target_w_prot(testapp, lab, award):
    item = {
        'description': "Protein target",
        'targeted_proteins': ['CTCF (ABCD)'],
        'award': award['@id'],
        'lab': lab['@id'],
    }
    return testapp.post_json('/target', item).json['@graph'][0]
@pytest.fixture
def exp_w_target_info(lab, award, human_biosample, exp_types,
                      mboI, genomic_region_bio_feature):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'biosample': human_biosample['@id'],
        'experiment_type': exp_types['capc']['@id'],
        'targeted_regions': [{'target': [genomic_region_bio_feature['@id']]}]
    }
@pytest.fixture
def expt_w_targ_region(testapp, exp_w_target_info):
    return testapp.post_json('/experiment_capture_c', exp_w_target_info).json['@graph'][0]
@pytest.fixture
def expt_w_2_targ_regions(testapp, exp_w_target_info, gene_bio_feature):
    region = {'target': [gene_bio_feature['@id']]}
    exp_w_target_info['targeted_regions'].append(region)
    return testapp.post_json('/experiment_capture_c', exp_w_target_info).json['@graph'][0]
@pytest.fixture
def expt_w_target_data(lab, award, human_biosample,
                       prot_bio_feature, exp_types):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'biosample': human_biosample['@id'],
        'experiment_type': exp_types['chia']['@id'],
        'targeted_factor': [prot_bio_feature['@id']]
    }
@pytest.fixture
def expt_w_target(testapp, expt_w_target_data):
    return testapp.post_json('/experiment_chiapet', expt_w_target_data).json['@graph'][0]
@pytest.fixture
def chipseq_expt(testapp, expt_w_target_data, exp_types):
    expt_w_target_data['experiment_type'] = exp_types['chipseq']['@id']
    return testapp.post_json('/experiment_seq', expt_w_target_data).json['@graph'][0]
@pytest.fixture
def tsaseq_expt(testapp, expt_w_target_data, exp_types):
    expt_w_target_data['experiment_type'] = exp_types['tsaseq']['@id']
    return testapp.post_json('/experiment_tsaseq', expt_w_target_data).json['@graph'][0]
@pytest.fixture
def repliseq_info(lab, award, human_biosample, exp_types):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
        'biosample': human_biosample['@id'],
        'experiment_type': exp_types['repliseq']['@id'],
    }
@pytest.fixture
def repliseq_1(testapp, repliseq_info):
    return testapp.post_json('/experiment_repliseq', repliseq_info).json['@graph'][0]
@pytest.fixture
def repliseq_2(testapp, repliseq_info):
    repliseq_info['stage_fraction'] = 'early'
    return testapp.post_json('/experiment_repliseq', repliseq_info).json['@graph'][0]
@pytest.fixture
def repliseq_3(testapp, repliseq_info):
    repliseq_info['stage_fraction'] = 'early'
    repliseq_info['total_fractions_in_exp'] = 16
    return testapp.post_json('/experiment_repliseq', repliseq_info).json['@graph'][0]
@pytest.fixture
def repliseq_4(testapp, repliseq_info):
    repliseq_info['stage_fraction'] = 'early'
    repliseq_info['total_fractions_in_exp'] = 2
    repliseq_info['cell_cycle_phase'] = 'S'
    return testapp.post_json('/experiment_repliseq', repliseq_info).json['@graph'][0]
@pytest.fixture
def experiment_atacseq(testapp, repliseq_info, exp_types):
    repliseq_info['experiment_type'] = exp_types['atacseq']['@id']
    return testapp.post_json('/experiment_atacseq', repliseq_info).json['@graph'][0]
@pytest.fixture
def damid_no_fusion(testapp, repliseq_info, exp_types):
    repliseq_info['experiment_type'] = exp_types['dam']['@id']
    return testapp.post_json('/experiment_damid', repliseq_info).json['@graph'][0]
@pytest.fixture
def damid_w_fusion(testapp, repliseq_info, prot_bio_feature, exp_types):
    repliseq_info['experiment_type'] = exp_types['dam']['@id']
    repliseq_info['targeted_factor'] = [prot_bio_feature['@id']]
    return testapp.post_json('/experiment_damid', repliseq_info).json['@graph'][0]
@pytest.fixture
def damid_w_multifusion(testapp, repliseq_info, prot_bio_feature, gene_bio_feature, exp_types):
    repliseq_info['experiment_type'] = exp_types['dam']['@id']
    repliseq_info['targeted_factor'] = [prot_bio_feature['@id'], gene_bio_feature['@id']]
    return testapp.post_json('/experiment_damid', repliseq_info).json['@graph'][0]
@pytest.fixture
def basic_info(lab, award):
    return {
        'lab': lab['@id'],
        'award': award['@id'],
    }
@pytest.fixture
def imaging_path_1(testapp, basic_info, genomic_region_bio_feature):
    basic_info['target'] = [genomic_region_bio_feature['@id']]
    basic_info['labeled_probe'] = 'FITC goat anti rabbit'
    return testapp.post_json('/imaging_path', basic_info).json['@graph'][0]
@pytest.fixture
def imaging_path_2(testapp, basic_info, genomic_region_bio_feature):
    basic_info['target'] = [genomic_region_bio_feature['@id']]
    basic_info['labeled_probe'] = 'TRITC horse anti rabbit'
    return testapp.post_json('/imaging_path', basic_info).json['@graph'][0]
@pytest.fixture
def imaging_path_3(testapp, basic_info, basic_region_bio_feature):
    basic_info['target'] = [basic_region_bio_feature['@id']]
    basic_info['labeled_probe'] = 'DAPI'
    return testapp.post_json('/imaging_path', basic_info).json['@graph'][0]
@pytest.fixture
def microscopy_no_path(testapp, repliseq_info, exp_types):
    repliseq_info['experiment_type'] = exp_types['fish']['@id']
    return testapp.post_json('/experiment_mic', repliseq_info).json['@graph'][0]
@pytest.fixture
def microscopy_w_path(testapp, repliseq_info, imaging_path_1, exp_types):
    repliseq_info['experiment_type'] = exp_types['fish']['@id']
    img_path = {'path': imaging_path_1['@id'], 'channel': 'ch01'}
    repliseq_info['imaging_paths'] = [img_path]
    return testapp.post_json('/experiment_mic', repliseq_info).json['@graph'][0]
@pytest.fixture
def microscopy_w_multipath(testapp, repliseq_info, imaging_path_1, imaging_path_2,
                           imaging_path_3, exp_types):
    repliseq_info['experiment_type'] = exp_types['fish']['@id']
    img_path1 = {'path': imaging_path_1['@id'], 'channel': 'ch01'}
    img_path2 = {'path': imaging_path_2['@id'], 'channel': 'ch02'}
    img_path3 = {'path': imaging_path_3['@id'], 'channel': 'ch03'}
    repliseq_info['imaging_paths'] = [img_path1, img_path2, img_path3]
    return testapp.post_json('/experiment_mic', repliseq_info).json['@graph'][0]
@pytest.fixture
def microscopy_w_splitpath(testapp, repliseq_info, exp_types,
                           imaging_path_1, imaging_path_3,
                           basic_region_bio_feature, genomic_region_bio_feature):
    '''Sometimes a (group of) target(s) is split into different imaging paths,
    e.g. due to multiplexing. If text is formatted as follows, the split group
    will be found and replaced with the sum'''
    repliseq_info['experiment_type'] = exp_types['fish']['@id']
    img_path1 = {'path': imaging_path_1['@id'], 'channel': 'ch01'}
    img_path3 = {'path': imaging_path_3['@id'], 'channel': 'ch03'}
    repliseq_info['imaging_paths'] = [img_path1, img_path3]
    testapp.patch_json(basic_region_bio_feature['@id'],
                       {'preferred_label': '15 TADs on chr19'}).json['@graph'][0]
    testapp.patch_json(genomic_region_bio_feature['@id'],
                       {'preferred_label': '22 TADs on chr19'}).json['@graph'][0]
    return testapp.post_json('/experiment_mic', repliseq_info).json['@graph'][0]
def test_experiment_atacseq_display_title(experiment_atacseq):
    assert experiment_atacseq.get('display_title') == 'ATAC-seq on GM12878 - ' + experiment_atacseq.get('accession')
def test_experiment_damid_w_multifusion_display_title(damid_w_multifusion):
    assert damid_w_multifusion.get('display_title') == 'DamID-seq with mulitiple DAM fusions on GM12878 - ' + damid_w_multifusion.get('accession')
def test_experiment_chiapet_w_target_display_title(expt_w_target):
    assert expt_w_target.get('display_title') == 'ChIA-PET against RAD21 protein on GM12878 - ' + expt_w_target.get('accession')
def test_experiment_chipseq_w_target_display_title(chipseq_expt):
    assert chipseq_expt.get('display_title') == 'ChIP-seq against RAD21 protein on GM12878 - ' + chipseq_expt.get('accession')
def test_experiment_tsaseq_display_title(tsaseq_expt):
    assert tsaseq_expt.get('display_title') == 'TSA-seq against RAD21 protein on GM12878 - ' + tsaseq_expt.get('accession')
def test_experiment_categorizer_4_mic_no_path(testapp, microscopy_no_path):
    assert microscopy_no_path['experiment_categorizer']['field'] == 'Default'
    assert microscopy_no_path['experiment_categorizer'].get('value') is None
def test_experiment_categorizer_4_mic_w_path(testapp, microscopy_w_path, genomic_region_bio_feature):
    assert microscopy_w_path['experiment_categorizer']['field'] == 'Target'
    assert microscopy_w_path['experiment_categorizer']['value'] == genomic_region_bio_feature['display_title']
def test_experiment_categorizer_4_mic_w_multi_path(testapp, microscopy_w_multipath, genomic_region_bio_feature, basic_region_bio_feature):
    vals2chk = [genomic_region_bio_feature['display_title'], basic_region_bio_feature['display_title']]
    len2chk = len(vals2chk[0]) + len(vals2chk[1]) + 2
    assert microscopy_w_multipath['experiment_categorizer']['field'] == 'Target'
    value = microscopy_w_multipath['experiment_categorizer']['value']
    assert len(value) == len2chk
    for v in vals2chk:
        assert v in value
def test_experiment_categorizer_4_mic_w_split_path(testapp, microscopy_w_splitpath):
    '''Sometimes a (group of) target(s) is split into different imaging paths,
    e.g. due to multiplexing. Sum the split targets and return only one string.'''
    assert microscopy_w_splitpath['experiment_categorizer']['value'] == '37 TADs on chr19'
def test_experiment_categorizer_4_chiapet_no_fusion(testapp, repliseq_info, exp_types):
    repliseq_info['experiment_type'] = exp_types['chia']['@id']
    res = testapp.post_json('/experiment_chiapet', repliseq_info).json['@graph'][0]
    assert res['experiment_categorizer']['field'] == 'Default'
    assert res['experiment_categorizer']['value'] is None
def test_experiment_categorizer_4_damid_no_fusion(testapp, damid_no_fusion):
    assert damid_no_fusion['experiment_categorizer']['field'] == 'Target'
    assert damid_no_fusion['experiment_categorizer'].get('value') == 'None (Control)'
def test_experiment_categorizer_4_damid_w_fusion(testapp, damid_w_fusion, prot_bio_feature):
    assert damid_w_fusion['experiment_categorizer']['field'] == 'Target'
    assert damid_w_fusion['experiment_categorizer']['value'] == prot_bio_feature['display_title']
def test_experiment_categorizer_4_repliseq_no_fraction_info(testapp, repliseq_1):
    assert repliseq_1['experiment_categorizer']['field'] == 'Default'
    assert repliseq_1['experiment_categorizer'].get('value') is None
def test_experiment_categorizer_4_repliseq_only_fraction(testapp, repliseq_2):
    wanted = 'early of an unspecified number of fractions'
    assert repliseq_2['experiment_categorizer']['field'] == 'Fraction'
    assert repliseq_2['experiment_categorizer']['value'] == wanted
def test_experiment_categorizer_4_repliseq_fraction_and_total(testapp, repliseq_3):
    wanted = 'early of 16 fractions'
    assert repliseq_3['experiment_categorizer']['field'] == 'Fraction'
    assert repliseq_3['experiment_categorizer']['value'] == wanted
def test_experiment_categorizer_w_target(testapp, expt_w_target, prot_bio_feature):
    assert expt_w_target['experiment_categorizer']['field'] == 'Target'
    assert expt_w_target['experiment_categorizer']['value'] == prot_bio_feature['display_title']
def test_experiment_categorizer_w_enzyme(testapp, experiment, mboI):
    assert experiment['experiment_categorizer']['field'] == 'Enzyme'
    assert experiment['experiment_categorizer']['value'] == mboI['display_title']
def test_experiment_categorizer_w_target_and_enzyme(testapp, expt_w_target, prot_bio_feature, mboI):
    # import pdb; pdb.set_trace()
    res = testapp.patch_json(expt_w_target['@id'], {'digestion_enzyme': mboI['@id']}).json['@graph'][0]
    assert res['digestion_enzyme'] == mboI['@id']
    assert res['experiment_categorizer']['field'] == 'Target'
    assert res['experiment_categorizer']['value'] == prot_bio_feature['display_title']
def test_experiment_categorizer_w_no_cat1(testapp, experiment_data, exp_types):
    del experiment_data['digestion_enzyme']
    experiment_data['experiment_type'] = exp_types['rnaseq']['@id']
    expt = testapp.post_json('/experiment_seq', experiment_data).json['@graph'][0]
    assert expt['experiment_categorizer']['field'] == 'Default'
    assert expt['experiment_categorizer'].get('value') is None
def test_experiment_categorizer_cap_c_no_regions(testapp, experiment_data, mboI, exp_types):
    experiment_data['experiment_type'] = exp_types['capc']['@id']
    expt = testapp.post_json('/experiment_capture_c', experiment_data).json['@graph'][0]
    assert expt['experiment_categorizer']['field'] == 'Enzyme'
    assert expt['experiment_categorizer']['value'] == mboI['display_title']
def test_experiment_categorizer_cap_c_w_region(expt_w_targ_region, genomic_region_bio_feature):
    assert expt_w_targ_region['experiment_categorizer']['field'] == 'Target'
    assert expt_w_targ_region['experiment_categorizer']['value'] == genomic_region_bio_feature['display_title']
def test_experiment_categorizer_cap_c_w_2regions(
        expt_w_2_targ_regions, genomic_region_bio_feature, gene_bio_feature):
    wanted = ', '.join(sorted([genomic_region_bio_feature['display_title'], gene_bio_feature['display_title']]))
    assert expt_w_2_targ_regions['experiment_categorizer']['field'] == 'Target'
    assert expt_w_2_targ_regions['experiment_categorizer']['value'] == wanted
@pytest.fixture
def new_exp_type(lab, award):
    data = {
        'uuid': str(uuid4()),
        'title': 'Title',
        'lab': lab['@id'],
        'award': award['@id'],
        'status': 'released',
        'valid_item_types': ['ExperimentSeq']
    }
    return data
def test_validate_exp_type_valid(testapp, experiment_data, new_exp_type):
    exp_type1 = testapp.post_json('/experiment_type', new_exp_type).json['@graph'][0]
    experiment_data['experiment_type'] = exp_type1['@id']
    expt = testapp.post_json('/experiment_hi_c', experiment_data, status=422)
    testapp.patch_json(exp_type1['@id'], {'valid_item_types': ['ExperimentSeq', 'ExperimentHiC']})
    expt = testapp.post_json('/experiment_hi_c', experiment_data, status=201).json['@graph'][0]
    assert expt['experiment_type'] == '/experiment-types/title/'
def test_validate_experiment_set_duplicate_replicate_experiments(testapp, rep_set_data, experiment):
    rep_set_data['replicate_exps'] = [{'bio_rep_no': 1, 'tec_rep_no': 1, 'replicate_exp': experiment['@id']},
                                      {'bio_rep_no': 1, 'tec_rep_no': 2, 'replicate_exp': experiment['@id']}]
    repset = testapp.post_json('/experiment_set_replicate', rep_set_data, status=422)
    assert repset.json['errors'][0]['name'] == 'ExperimentSet: non-unique exps'
    assert 'Duplicate experiment' in repset.json['errors'][0]['description']
 | |
| 
	'''tzinfo timezone information for Australia/NSW.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class NSW(DstTzInfo):
    '''Australia/NSW timezone definition. See datetime.tzinfo for details'''
    zone = 'Australia/NSW'
    _utc_transition_times = [
d(1,1,1,0,0,0),
d(1916,12,31,14,1,0),
d(1917,3,24,15,0,0),
d(1941,12,31,16,0,0),
d(1942,3,28,15,0,0),
d(1942,9,26,16,0,0),
d(1943,3,27,15,0,0),
d(1943,10,2,16,0,0),
d(1944,3,25,15,0,0),
d(1971,10,30,16,0,0),
d(1972,2,26,16,0,0),
d(1972,10,28,16,0,0),
d(1973,3,3,16,0,0),
d(1973,10,27,16,0,0),
d(1974,3,2,16,0,0),
d(1974,10,26,16,0,0),
d(1975,3,1,16,0,0),
d(1975,10,25,16,0,0),
d(1976,3,6,16,0,0),
d(1976,10,30,16,0,0),
d(1977,3,5,16,0,0),
d(1977,10,29,16,0,0),
d(1978,3,4,16,0,0),
d(1978,10,28,16,0,0),
d(1979,3,3,16,0,0),
d(1979,10,27,16,0,0),
d(1980,3,1,16,0,0),
d(1980,10,25,16,0,0),
d(1981,2,28,16,0,0),
d(1981,10,24,16,0,0),
d(1982,4,3,16,0,0),
d(1982,10,30,16,0,0),
d(1983,3,5,16,0,0),
d(1983,10,29,16,0,0),
d(1984,3,3,16,0,0),
d(1984,10,27,16,0,0),
d(1985,3,2,16,0,0),
d(1985,10,26,16,0,0),
d(1986,3,15,16,0,0),
d(1986,10,18,16,0,0),
d(1987,3,14,16,0,0),
d(1987,10,24,16,0,0),
d(1988,3,19,16,0,0),
d(1988,10,29,16,0,0),
d(1989,3,18,16,0,0),
d(1989,10,28,16,0,0),
d(1990,3,3,16,0,0),
d(1990,10,27,16,0,0),
d(1991,3,2,16,0,0),
d(1991,10,26,16,0,0),
d(1992,2,29,16,0,0),
d(1992,10,24,16,0,0),
d(1993,3,6,16,0,0),
d(1993,10,30,16,0,0),
d(1994,3,5,16,0,0),
d(1994,10,29,16,0,0),
d(1995,3,4,16,0,0),
d(1995,10,28,16,0,0),
d(1996,3,30,16,0,0),
d(1996,10,26,16,0,0),
d(1997,3,29,16,0,0),
d(1997,10,25,16,0,0),
d(1998,3,28,16,0,0),
d(1998,10,24,16,0,0),
d(1999,3,27,16,0,0),
d(1999,10,30,16,0,0),
d(2000,3,25,16,0,0),
d(2000,8,26,16,0,0),
d(2001,3,24,16,0,0),
d(2001,10,27,16,0,0),
d(2002,3,30,16,0,0),
d(2002,10,26,16,0,0),
d(2003,3,29,16,0,0),
d(2003,10,25,16,0,0),
d(2004,3,27,16,0,0),
d(2004,10,30,16,0,0),
d(2005,3,26,16,0,0),
d(2005,10,29,16,0,0),
d(2006,4,1,16,0,0),
d(2006,10,28,16,0,0),
d(2007,3,24,16,0,0),
d(2007,10,27,16,0,0),
d(2008,3,29,16,0,0),
d(2008,10,25,16,0,0),
d(2009,3,28,16,0,0),
d(2009,10,24,16,0,0),
d(2010,3,27,16,0,0),
d(2010,10,30,16,0,0),
d(2011,3,26,16,0,0),
d(2011,10,29,16,0,0),
d(2012,3,24,16,0,0),
d(2012,10,27,16,0,0),
d(2013,3,30,16,0,0),
d(2013,10,26,16,0,0),
d(2014,3,29,16,0,0),
d(2014,10,25,16,0,0),
d(2015,3,28,16,0,0),
d(2015,10,24,16,0,0),
d(2016,3,26,16,0,0),
d(2016,10,29,16,0,0),
d(2017,3,25,16,0,0),
d(2017,10,28,16,0,0),
d(2018,3,24,16,0,0),
d(2018,10,27,16,0,0),
d(2019,3,30,16,0,0),
d(2019,10,26,16,0,0),
d(2020,3,28,16,0,0),
d(2020,10,24,16,0,0),
d(2021,3,27,16,0,0),
d(2021,10,30,16,0,0),
d(2022,3,26,16,0,0),
d(2022,10,29,16,0,0),
d(2023,3,25,16,0,0),
d(2023,10,28,16,0,0),
d(2024,3,30,16,0,0),
d(2024,10,26,16,0,0),
d(2025,3,29,16,0,0),
d(2025,10,25,16,0,0),
d(2026,3,28,16,0,0),
d(2026,10,24,16,0,0),
d(2027,3,27,16,0,0),
d(2027,10,30,16,0,0),
d(2028,3,25,16,0,0),
d(2028,10,28,16,0,0),
d(2029,3,24,16,0,0),
d(2029,10,27,16,0,0),
d(2030,3,30,16,0,0),
d(2030,10,26,16,0,0),
d(2031,3,29,16,0,0),
d(2031,10,25,16,0,0),
d(2032,3,27,16,0,0),
d(2032,10,30,16,0,0),
d(2033,3,26,16,0,0),
d(2033,10,29,16,0,0),
d(2034,3,25,16,0,0),
d(2034,10,28,16,0,0),
d(2035,3,24,16,0,0),
d(2035,10,27,16,0,0),
d(2036,3,29,16,0,0),
d(2036,10,25,16,0,0),
d(2037,3,28,16,0,0),
d(2037,10,24,16,0,0),
        ]
    _transition_info = [
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
i(36000,0,'EST'),
i(39600,3600,'EST'),
        ]
NSW = NSW()
 | |
| 
	#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import unittest
import numpy as np
import paddle
# Support types are ref from `paddle.tensor.math`
# - Related paddle dtypes:
#  - int type: int64, (no test here: uint8, int8, int16, int32)
#  - float type: float32, (no test here: float64)
# - Python scalar dtypes: 
#  - int(64)
#  - float(64)
class TestTensorScalarTypePromotionDynamic(unittest.TestCase):
    def check_operation(self, a, b, c, op):
        if op == '+':
            c_rlt = a + b
        elif op == '-':
            c_rlt = a - b
        elif op == '*':
            c_rlt = a * b
        elif op == '/':
            c_rlt = a / b
        elif op == '**':
            c_rlt = a**b
        elif op == '//':
            c_rlt = a // b
        elif op == '%':
            c_rlt = a % b
        else:
            raise ValueError("Unsupported operation.")
        self.assertEqual(c_rlt.dtype, c.dtype)
        self.assertTrue(np.array_equal(c_rlt.numpy(), c.numpy()))
    def test_tensor_add_scalar(self):
        # tensor(int64) + scalar(int)
        a = paddle.ones([2, 2, 2], dtype='int64')
        b = 1
        c = paddle.full([2, 2, 2], 2, dtype="int64")
        self.check_operation(a, b, c, '+')
        # tensor(float32) + scalar(int)
        a = paddle.ones([2, 2, 2], dtype='float32')
        b = 1
        c = paddle.full([2, 2, 2], 2, dtype="float32")
        self.check_operation(a, b, c, '+')
        # tensor(int64) + scalar(float, .0)
        a = paddle.ones([2, 2, 2], dtype='int64')
        b = 1.0
        c = paddle.full([2, 2, 2], 2, dtype="float32")
        self.check_operation(a, b, c, '+')
        # tensor(int64) + scalar(float, .5)
        a = paddle.ones([2, 2, 2], dtype='int64')
        b = 1.5
        c = paddle.full([2, 2, 2], 2.5, dtype="float32")
        self.check_operation(a, b, c, '+')
        # tensor(float32) + scalar(float)
        a = paddle.ones([2, 2, 2], dtype='float32')
        b = 1.5
        c = paddle.full([2, 2, 2], 2.5, dtype="float32")
        self.check_operation(a, b, c, '+')
    def test_tensor_sub_scalar(self):
        # tensor(int64) - scalar(int)
        a = paddle.ones([2, 2, 2], dtype='int64')
        b = 1
        c = paddle.zeros([2, 2, 2], dtype="int64")
        self.check_operation(a, b, c, '-')
        # tensor(float32) - scalar(int)
        a = paddle.ones([2, 2, 2], dtype='float32')
        b = 1
        c = paddle.zeros([2, 2, 2], dtype="float32")
        self.check_operation(a, b, c, '-')
        # tensor(int64) - scalar(float, .0)
        a = paddle.ones([2, 2, 2], dtype='int64')
        b = 1.0
        c = paddle.zeros([2, 2, 2], dtype="float32")
        self.check_operation(a, b, c, '-')
        # tensor(int64) - scalar(float, .5)
        a = paddle.full([2, 2, 2], 2, dtype='int64')
        b = 1.5
        c = paddle.full([2, 2, 2], 0.5, dtype="float32")
        self.check_operation(a, b, c, '-')
        # tensor(float32) - scalar(float)
        a = paddle.full([2, 2, 2], 2, dtype='float32')
        b = 1.5
        c = paddle.full([2, 2, 2], 0.5, dtype="float32")
        self.check_operation(a, b, c, '-')
    def test_scalar_sub_tensor(self):
        # scalar(int) - tensor(int64)
        a = 1
        b = paddle.ones([2, 2, 2], dtype='int64')
        c = paddle.zeros([2, 2, 2], dtype="int64")
        self.check_operation(a, b, c, '-')
        # scalar(int) - tensor(float32)
        a = 1
        b = paddle.ones([2, 2, 2], dtype='float32')
        c = paddle.zeros([2, 2, 2], dtype="float32")
        self.check_operation(a, b, c, '-')
        # scalar(float, .0) - tensor(int64)
        a = 1.0
        b = paddle.ones([2, 2, 2], dtype='int64')
        c = paddle.zeros([2, 2, 2], dtype="float32")
        self.check_operation(a, b, c, '-')
        # scalar(float, .5) - tensor(int64)
        a = 1.5
        b = paddle.full([2, 2, 2], 2, dtype='int64')
        c = paddle.full([2, 2, 2], -0.5, dtype="float32")
        self.check_operation(a, b, c, '-')
        # scalar(float) - tensor(float32)
        a = 1.5
        b = paddle.full([2, 2, 2], 2, dtype='float32')
        c = paddle.full([2, 2, 2], -0.5, dtype="float32")
        self.check_operation(a, b, c, '-')
    def test_tensor_mul_tensor(self):
        # tensor(int64) * scalar(int)
        a = paddle.ones([2, 2, 2], dtype='int64')
        b = 1
        c = paddle.ones([2, 2, 2], dtype="int64")
        self.check_operation(a, b, c, '*')
        # tensor(float32) * scalar(int)
        a = paddle.ones([2, 2, 2], dtype='float32')
        b = 1
        c = paddle.ones([2, 2, 2], dtype="float32")
        self.check_operation(a, b, c, '*')
        # tensor(int64) * scalar(float, .0)
        a = paddle.ones([2, 2, 2], dtype='int64')
        b = 1.0
        c = paddle.ones([2, 2, 2], dtype="float32")
        self.check_operation(a, b, c, '*')
        # tensor(int64) * scalar(float, .5)
        a = paddle.ones([2, 2, 2], dtype='int64')
        b = 1.5
        c = paddle.full([2, 2, 2], 1.5, dtype="float32")
        self.check_operation(a, b, c, '*')
        # tensor(float32) * scalar(float)
        a = paddle.ones([2, 2, 2], dtype='float32')
        b = 1.5
        c = paddle.full([2, 2, 2], 1.5, dtype="float32")
        self.check_operation(a, b, c, '*')
    def test_tensor_div_scalar(self):
        # tensor(int64) / scalar(int)
        a = paddle.ones([2, 2, 2], dtype='int64')
        b = 2
        c = paddle.full([2, 2, 2], 0.5, dtype="float32")
        self.check_operation(a, b, c, '/')
        # tensor(float32) / scalar(int)
        a = paddle.ones([2, 2, 2], dtype='float32')
        b = 2
        c = paddle.full([2, 2, 2], 0.5, dtype="float32")
        self.check_operation(a, b, c, '/')
        # tensor(int64) / scalar(float, .0)
        a = paddle.ones([2, 2, 2], dtype='int64')
        b = 2.0
        c = paddle.full([2, 2, 2], 0.5, dtype="float32")
        self.check_operation(a, b, c, '/')
        # tensor(int64) / scalar(float, .5)
        a = paddle.ones([2, 2, 2], dtype='int64')
        b = 0.5
        c = paddle.full([2, 2, 2], 2, dtype="float32")
        self.check_operation(a, b, c, '/')
        # tensor(float32) / scalar(float)
        a = paddle.ones([2, 2, 2], dtype='float32')
        b = 0.5
        c = paddle.full([2, 2, 2], 2, dtype="float32")
        self.check_operation(a, b, c, '/')
    def test_scalar_div_tensor(self):
        # scalar(int) / tensor(int64)
        a = 1
        b = paddle.full([2, 2, 2], 2, dtype='int64')
        c = paddle.full([2, 2, 2], 0.5, dtype="float32")
        self.check_operation(a, b, c, '/')
        # scalar(int) / tensor(float32)
        a = 1
        b = paddle.full([2, 2, 2], 0.5, dtype='float32')
        c = paddle.full([2, 2, 2], 2, dtype="float32")
        self.check_operation(a, b, c, '/')
        # scalar(float) / tensor(int64)
        a = 1.0
        b = paddle.full([2, 2, 2], 2, dtype='int64')
        c = paddle.full([2, 2, 2], 0.5, dtype="float32")
        self.check_operation(a, b, c, '/')
        # scalar(float) / tensor(float32)
        a = 1.0
        b = paddle.full([2, 2, 2], 0.5, dtype='float32')
        c = paddle.full([2, 2, 2], 2, dtype="float32")
        self.check_operation(a, b, c, '/')
    def test_tensor_pow_scalar(self):
        # tensor(int64) ** scalar(int)
        a = paddle.full([2, 2, 2], 2, dtype='int64')
        b = 3
        c = paddle.full([2, 2, 2], 8, dtype="int64")
        self.check_operation(a, b, c, '**')
        # tensor(int64) ** scalar(float)
        a = paddle.full([2, 2, 2], 2, dtype='int64')
        b = 3.0
        c = paddle.full([2, 2, 2], 8, dtype="float32")
        self.check_operation(a, b, c, '**')
        # tensor(float32) ** scalar(int)
        a = paddle.full([2, 2, 2], 2, dtype='float32')
        b = 3
        c = paddle.full([2, 2, 2], 8, dtype="float32")
        self.check_operation(a, b, c, '**')
        # tensor(float32) ** scalar(float)
        a = paddle.full([2, 2, 2], 2, dtype='float32')
        b = 3.0
        c = paddle.full([2, 2, 2], 8, dtype="float32")
        self.check_operation(a, b, c, '**')
    def test_scalar_pow_tensor(self):
        # scalar(int) ** tensor(int64)
        a = 3
        b = paddle.full([2, 2, 2], 2, dtype='int64')
        c = paddle.full([2, 2, 2], 9, dtype="int64")
        self.check_operation(a, b, c, '**')
        # scalar(float) ** tensor(int64)
        a = 3.0
        b = paddle.full([2, 2, 2], 2, dtype='int64')
        c = paddle.full([2, 2, 2], 9, dtype="float32")
        self.check_operation(a, b, c, '**')
        # scalar(int) ** tensor(float32)
        a = 3
        b = paddle.full([2, 2, 2], 2, dtype='float32')
        c = paddle.full([2, 2, 2], 9, dtype="float32")
        self.check_operation(a, b, c, '**')
        # tensor(float32) ** scalar(float)
        a = 3.0
        b = paddle.full([2, 2, 2], 2, dtype='float32')
        c = paddle.full([2, 2, 2], 9, dtype="float32")
        self.check_operation(a, b, c, '**')
    ## TODO: floordiv op kernel doesn't support float
    def test_tensor_floordiv_scalar(self):
        # tensor(int64) // scalar(int)
        a = paddle.full([2, 2, 2], 3, dtype='int64')
        b = 2
        c = paddle.full([2, 2, 2], 1, dtype="int64")
        self.check_operation(a, b, c, '//')
    def test_tensor_mod_scalar(self):
        # tensor(int64) % scalar(int)
        a = paddle.full([2, 2, 2], 3, dtype='int64')
        b = 2
        c = paddle.full([2, 2, 2], 1, dtype="int64")
        self.check_operation(a, b, c, '%')
        # tensor(int64) % scalar(float)
        a = paddle.full([2, 2, 2], 3, dtype='int64')
        b = 2.0
        c = paddle.full([2, 2, 2], 1, dtype="float32")
        self.check_operation(a, b, c, '%')
        # tensor(float32) % scalar(int)
        a = paddle.full([2, 2, 2], 3, dtype='float32')
        b = 2
        c = paddle.full([2, 2, 2], 1, dtype="float32")
        self.check_operation(a, b, c, '%')
        # tensor(float32) % scalar(float)
        a = paddle.full([2, 2, 2], 3, dtype='float32')
        b = 2.0
        c = paddle.full([2, 2, 2], 1, dtype="float32")
        self.check_operation(a, b, c, '%')
if __name__ == '__main__':
    unittest.main()
 | |
| 
	from __future__ import unicode_literals, division, absolute_import
from builtins import *  # pylint: disable=unused-import, redefined-builtin
import copy
import logging
from math import ceil
from flask import jsonify
from flask import request
from sqlalchemy.orm.exc import NoResultFound
from flexget.api import api, APIResource
from flexget.plugins.list import movie_list as ml
from flexget.utils.tools import split_title_year
log = logging.getLogger('movie_list')
movie_list_api = api.namespace('movie_list', description='Movie List operations')
default_error_schema = {
    'type': 'object',
    'properties': {
        'status': {'type': 'string'},
        'message': {'type': 'string'}
    }
}
empty_response = api.schema('empty', {'type': 'object'})
default_error_schema = api.schema('default_error_schema', default_error_schema)
empty_response = api.schema('empty_response', empty_response)
allowed_ids = ml.SUPPORTED_IDS
input_movie_list_id_object = {
    'type': 'array',
    'items': {
        'type': 'object',
        'minProperties': 1,
        'additionalProperties': True
    }
}
input_movie_entry = {
    'type': 'object',
    'properties': {
        'title': {'type': 'string'},
        'original_url': {'type': 'string'},
        'movie_name': {'type': 'string'},
        'movie_year': {'type': 'integer'},
        'movie_identifiers': input_movie_list_id_object
    },
    'additionalProperties': True,
    'required': ['original_url'],
    'anyOf': [
        {'required': ['title']},
        {'required': ['movie_name', 'movie_year']}
    ]
}
return_movie_list_id_object = copy.deepcopy(input_movie_list_id_object)
return_movie_list_id_object.update(
    {'properties': {
        'id': {'type': 'integer'},
        'added_on': {'type': 'string'},
        'movie_id': {'type': 'integer'}
    }})
movie_list_object = {
    'type': 'object',
    'properties': {
        'title': {'type': 'string'},
        'added_on': {'type': 'string'},
        'year': {'type': 'integer'},
        'list_id': {'type': 'integer'},
        'movie_list_ids': {
            'type': 'array',
            'items': return_movie_list_id_object
        },
    }
}
list_object = {
    'type': 'object',
    'properties': {
        'id': {'type': 'integer'},
        'added_on': {'type': 'string'},
        'name': {'type': 'string'}
    }
}
list_input = copy.deepcopy(list_object)
del list_input['properties']['id']
del list_input['properties']['added_on']
return_movies = {
    'type': 'object',
    'properties': {
        'movies': {
            'type': 'array',
            'items': movie_list_object
        },
        'number_of_movies': {'type': 'integer'},
        'total_number_of_movies': {'type': 'integer'},
        'page_number': {'type': 'integer'}
    }
}
return_lists = {'type': 'array', 'items': list_object}
input_movie_entry_schema = api.schema('input_movie_entry', input_movie_entry)
input_movie_list_id_schema = api.schema('input_movie_list_id_object', input_movie_list_id_object)
movie_list_id_object_schema = api.schema('movie_list_id_object', return_movie_list_id_object)
movie_list_object_schema = api.schema('movie_list_object', movie_list_object)
list_object_schema = api.schema('list_object', list_object)
return_lists_schema = api.schema('return_lists', return_lists)
return_movies_schema = api.schema('return_movies', return_movies)
new_list_schema = api.schema('new_list', list_input)
movie_list_parser = api.parser()
movie_list_parser.add_argument('name', help='Filter results by list name')
@movie_list_api.route('/')
class MovieListAPI(APIResource):
    @api.response(200, model=return_lists_schema)
    @api.doc(parser=movie_list_parser)
    def get(self, session=None):
        """ Gets movies lists """
        args = movie_list_parser.parse_args()
        name = args.get('name')
        movie_lists = [movie_list.to_dict() for movie_list in ml.get_movie_lists(name=name, session=session)]
        return jsonify({'movie_lists': movie_lists})
    @api.validate(new_list_schema)
    @api.response(201, model=list_object_schema)
    @api.response(500, description='List already exist', model=default_error_schema)
    def post(self, session=None):
        """ Create a new list """
        data = request.json
        name = data.get('name')
        try:
            movie_list = ml.get_list_by_exact_name(name=name, session=session)
        except NoResultFound:
            movie_list = None
        if movie_list:
            return {'status': 'error',
                    'message': "list with name '%s' already exists" % name}, 500
        movie_list = ml.MovieListList(name=name)
        session.add(movie_list)
        session.commit()
        resp = jsonify(movie_list.to_dict())
        resp.status_code = 201
        return resp
@movie_list_api.route('/<int:list_id>/')
@api.doc(params={'list_id': 'ID of the list'})
class MovieListListAPI(APIResource):
    @api.response(404, model=default_error_schema)
    @api.response(200, model=list_object_schema)
    def get(self, list_id, session=None):
        """ Get list by ID """
        try:
            movie_list = ml.get_list_by_id(list_id=list_id, session=session)
        except NoResultFound:
            return {'status': 'error',
                    'message': 'list_id %d does not exist' % list_id}, 404
        return jsonify(movie_list.to_dict())
    @api.response(200, model=empty_response)
    @api.response(404, model=default_error_schema)
    def delete(self, list_id, session=None):
        """ Delete list by ID """
        try:
            movie_list = ml.get_list_by_id(list_id=list_id, session=session)
        except NoResultFound:
            return {'status': 'error',
                    'message': 'list_id %d does not exist' % list_id}, 404
        session.delete(movie_list)
        return {}
movie_identifiers_doc = "Use movie identifier using the following format:\n[{'ID_NAME: 'ID_VALUE'}]." \
                        " Has to be one of %s" % " ,".join(allowed_ids)
movies_parser = api.parser()
movies_parser.add_argument('sort_by', choices=('id', 'added', 'title', 'year'), default='title',
                           help='Sort by attribute')
movies_parser.add_argument('order', choices=('desc', 'asc'), default='desc', help='Sorting order')
movies_parser.add_argument('page', type=int, default=1, help='Page number')
movies_parser.add_argument('page_size', type=int, default=10, help='Number of movies per page')
@movie_list_api.route('/<int:list_id>/movies/')
class MovieListMoviesAPI(APIResource):
    @api.response(404, model=default_error_schema)
    @api.response(200, model=return_movies_schema)
    @api.doc(params={'list_id': 'ID of the list'}, parser=movies_parser)
    def get(self, list_id, session=None):
        """ Get movies by list ID """
        args = movies_parser.parse_args()
        page = args.get('page')
        page_size = args.get('page_size')
        start = page_size * (page - 1)
        stop = start + page_size
        if args.get('order') == 'desc':
            descending = True
        else:
            descending = False
        kwargs = {
            'start': start,
            'stop': stop,
            'list_id': list_id,
            'order_by': args.get('sort_by'),
            'descending': descending,
            'session': session
        }
        try:
            movie_list = ml.get_list_by_id(list_id=list_id, session=session)
        except NoResultFound:
            return {'status': 'error',
                    'message': 'list_id %d does not exist' % list_id}, 404
        count = ml.get_movies_by_list_id(count=True, **kwargs)
        movies = [movie.to_dict() for movie in ml.get_movies_by_list_id(**kwargs)]
        pages = int(ceil(count / float(page_size)))
        number_of_movies = min(page_size, count)
        return jsonify({'movies': movies,
                        'number_of_movies': number_of_movies,
                        'total_number_of_movies': count,
                        'page': page,
                        'total_number_of_pages': pages})
    @api.validate(model=input_movie_entry_schema, description=movie_identifiers_doc)
    @api.response(201, model=movie_list_object_schema)
    @api.response(404, description='List not found', model=default_error_schema)
    @api.response(500, description='Movie already exist in list', model=default_error_schema)
    @api.response(501, description='Movie identifier not allowed', model=default_error_schema)
    def post(self, list_id, session=None):
        """ Add movies to list by ID """
        try:
            movie_list = ml.get_list_by_id(list_id=list_id, session=session)
        except NoResultFound:
            return {'status': 'error',
                    'message': 'list_id %d does not exist' % list_id}, 404
        data = request.json
        # Validates ID type based on allowed ID
        # TODO pass this to json schema validation
        for id_name in data.get('movie_identifiers'):
            if set(id_name.keys()) & set(allowed_ids) == set([]):
                return {'status': 'error',
                        'message': 'movie identifier %s is not allowed' % id_name}, 501
        if 'movie_name' in data:
            title, year = data['movie_name'], data.get('movie_year')
        else:
            title, year = split_title_year(data['title'])
        movie = ml.get_movie_by_title(list_id=list_id, title=title, session=session)
        if movie:
            return {'status': 'error',
                    'message': 'movie with name "%s" already exist in list %d' % (title, list_id)}, 500
        movie = ml.MovieListMovie()
        movie.title = title
        movie.year = year
        movie.ids = ml.get_db_movie_identifiers(identifier_list=data.get('movie_identifiers'), session=session)
        movie.list_id = list_id
        session.add(movie)
        session.commit()
        response = jsonify({'movie': movie.to_dict()})
        response.status_code = 201
        return response
@movie_list_api.route('/<int:list_id>/movies/<int:movie_id>/')
@api.doc(params={'list_id': 'ID of the list', 'movie_id': 'ID of the movie'})
@api.response(404, description='List or movie not found', model=default_error_schema)
class MovieListMovieAPI(APIResource):
    @api.response(200, model=movie_list_object_schema)
    def get(self, list_id, movie_id, session=None):
        """ Get a movie by list ID and movie ID """
        try:
            movie = ml.get_movie_by_id(list_id=list_id, movie_id=movie_id, session=session)
        except NoResultFound:
            return {'status': 'error',
                    'message': 'could not find movie with id %d in list %d' % (movie_id, list_id)}, 404
        return jsonify(movie.to_dict())
    @api.response(200, model=empty_response)
    def delete(self, list_id, movie_id, session=None):
        """ Delete a movie by list ID and movie ID """
        try:
            movie = ml.get_movie_by_id(list_id=list_id, movie_id=movie_id, session=session)
        except NoResultFound:
            return {'status': 'error',
                    'message': 'could not find movie with id %d in list %d' % (movie_id, list_id)}, 404
        log.debug('deleting movie %d', movie.id)
        session.delete(movie)
        return {}
    @api.validate(model=input_movie_list_id_schema, description=movie_identifiers_doc)
    @api.response(200, model=movie_list_object_schema)
    @api.response(501, description='Movie identifier not allowed', model=default_error_schema)
    @api.doc(description='Sent movie identifiers will override any existing identifiers that the movie currently holds')
    def put(self, list_id, movie_id, session=None):
        """ Sets movie identifiers """
        try:
            movie = ml.get_movie_by_id(list_id=list_id, movie_id=movie_id, session=session)
        except NoResultFound:
            return {'status': 'error',
                    'message': 'could not find movie with id %d in list %d' % (movie_id, list_id)}, 404
        data = request.json
        # Validates ID type based on allowed ID
        # TODO pass this to json shcema validation
        for id_name in data:
            if set(id_name.keys()) & set(allowed_ids) == set([]):
                return {'status': 'error',
                        'message': 'movie identifier %s is not allowed' % id_name}, 501
        movie.ids[:] = ml.get_db_movie_identifiers(identifier_list=data, movie_id=movie_id, session=session)
        session.commit()
        return jsonify(movie.to_dict())
 | |
| 
	# coding: utf-8
""" Key value store interface of MXNet for parameter synchronization."""
from __future__ import absolute_import
import ctypes
import pickle
from .ndarray import NDArray
from .base import _LIB
from .base import check_call, c_array, c_str, string_types, mx_uint, py_str
from .base import NDArrayHandle, KVStoreHandle
from . import optimizer as opt
def _ctype_key_value(keys, vals):
    """
    Returns ctype arrays for the key-value args. For internal use.
    """
    if isinstance(keys, int):
        if isinstance(vals, NDArray):
            return (c_array(ctypes.c_int, [keys]),
                    c_array(NDArrayHandle, [vals.handle]))
        else:
            for value in vals:
                assert(isinstance(value, NDArray))
            return (c_array(ctypes.c_int, [keys] * len(vals)),
                    c_array(NDArrayHandle, [value.handle for value in vals]))
    else:
        assert(len(keys) == len(vals))
        for k in keys:
            assert(isinstance(k, int))
        c_keys = []
        c_vals = []
        for key, val in zip(keys, vals):
            c_key_i, c_val_i = _ctype_key_value(key, val)
            c_keys += c_key_i
            c_vals += c_val_i
        return (c_array(ctypes.c_int, c_keys), c_array(NDArrayHandle, c_vals))
def _updater_wrapper(updater):
    """A wrapper for the user-defined handle."""
    def updater_handle(key, lhs_handle, rhs_handle, _):
        """ ctypes function """
        lhs = NDArray(NDArrayHandle(lhs_handle))
        rhs = NDArray(NDArrayHandle(rhs_handle))
        updater(key, lhs, rhs)
    return updater_handle
class KVStore(object):
    """A key-value store for synchronization of values, over multiple devices."""
    def __init__(self, handle):
        """Initializes a new KVStore.
        Parameters
        ----------
        handle : KVStoreHandle
            `KVStore` handle of C API.
        """
        assert isinstance(handle, KVStoreHandle)
        self.handle = handle
        self._updater = None
        self._updater_func = None
    def __del__(self):
        check_call(_LIB.MXKVStoreFree(self.handle))
    def init(self, key, value):
        """ Initializes a single or a sequence of key-value pairs into the store.
        For each key, one must `init` it before calling `push` or `pull`.
        When multiple workers invoke `init` for the same key, only
        the value supplied by worker with rank `0` is used. This function returns
        after data has been initialized successfully.
        Parameters
        ----------
        key : int or sequence of int
            The keys.
        value : NDArray or sequence of NDArray
            Values corresponding to the keys.
        Examples
        --------
        >>> # init a single key-value pair
        >>> shape = (2,3)
        >>> kv = mx.kv.create('local')
        >>> kv.init(3, mx.nd.ones(shape)*2)
        >>> a = mx.nd.zeros(shape)
        >>> kv.pull(3, out=a)
        >>> print a.asnumpy()
        [[ 2.  2.  2.]
        [ 2.  2.  2.]]
        >>> # init a list of key-value pairs
        >>> keys = [5, 7, 9]
        >>> kv.init(keys, [mx.nd.ones(shape)]*len(keys))
        """
        ckeys, cvals = _ctype_key_value(key, value)
        check_call(_LIB.MXKVStoreInit(
            self.handle, mx_uint(len(ckeys)), ckeys, cvals))
    def push(self, key, value, priority=0):
        """ Pushes a single or a sequence of key-value pairs into the store.
        This function returns immediately after adding an operator to the engine.
        The actual operation is executed asynchronously after all previous `push`
        and `pull` calls for the same input key(s) are finished.
        There is no synchronization between workers. One can use ``_barrier()``
        to sync all workers.
        Parameters
        ----------
        key : int or list of int
            Keys.
        value : NDArray or list of NDArray or list of list of NDArray
            Values corresponding to the keys.
        priority : int, optional
            The priority of the push operation.
            Higher priority push operations are likely to be executed before
            other push actions.
        Examples
        --------
        >>> # push a single key-value pair
        >>> kv.push(3, mx.nd.ones(shape)*8)
        >>> kv.pull(3, out=a) # pull out the value
        >>> print a.asnumpy()
        [[ 8.  8.  8.]
        [ 8.  8.  8.]]
        >>> # aggregate the value and the push
        >>> gpus = [mx.gpu(i) for i in range(4)]
        >>> b = [mx.nd.ones(shape, gpu) for gpu in gpus]
        >>> kv.push(3, b)
        >>> kv.pull(3, out=a)
        >>> print a.asnumpy()
        [[ 4.  4.  4.]
        [ 4.  4.  4.]]
        >>> # push a list of keys.
        >>> # single device
        >>> kv.push(keys, [mx.nd.ones(shape)]*len(keys))
        >>> b = [mx.nd.zeros(shape)]*len(keys)
        >>> kv.pull(keys, out=b)
        >>> print b[1].asnumpy()
        [[ 1.  1.  1.]
        [ 1.  1.  1.]]
        >>> # multiple devices:
        >>> b = [[mx.nd.ones(shape, gpu) for gpu in gpus]] * len(keys)
        >>> kv.push(keys, b)
        >>> kv.pull(keys, out=b)
        >>> print b[1][1].asnumpy()
        [[ 4.  4.  4.]
        [ 4.  4.  4.]]
        """
        ckeys, cvals = _ctype_key_value(key, value)
        check_call(_LIB.MXKVStorePush(
            self.handle, mx_uint(len(ckeys)), ckeys, cvals,
            ctypes.c_int(priority)))
    def pull(self, key, out=None, priority=0):
        """ Pulls a single value or a sequence of values from the store.
        This function returns immediately after adding an operator to the engine.
        Subsequent attempts to read from the `out` variable will be blocked until the
        pull operation completes.
        `pull` is executed asynchronously after all previous `push` and `pull` calls
        for the same input key(s) are finished.
        The returned values are gauranteed to be the latest values in the store.
        Parameters
        ----------
        key : int or list of int
            Keys.
        out: NDArray or list of NDArray or list of list of NDArray
            Values corresponding to the keys.
        priority : int, optional
            The priority of the pull operation.
            Higher priority pull operations are likely to be executed before
            other pull actions.
        Examples
        --------
        >>> # pull a single key-value pair
        >>> a = mx.nd.zeros(shape)
        >>> kv.pull(3, out=a)
        >>> print a.asnumpy()
        [[ 2.  2.  2.]
        [ 2.  2.  2.]]
        >>> # pull into multiple devices
        >>> b = [mx.nd.ones(shape, gpu) for gpu in gpus]
        >>> kv.pull(3, out=b)
        >>> print b[1].asnumpy()
        [[ 2.  2.  2.]
        [ 2.  2.  2.]]
        >>> # pull a list of key-value pairs.
        >>> # On single device
        >>> keys = [5, 7, 9]
        >>> b = [mx.nd.zeros(shape)]*len(keys)
        >>> kv.pull(keys, out=b)
        >>> print b[1].asnumpy()
        [[ 2.  2.  2.]
        [ 2.  2.  2.]]
        >>> # On multiple devices
        >>> b = [[mx.nd.ones(shape, gpu) for gpu in gpus]] * len(keys)
        >>> kv.pull(keys, out=b)
        >>> print b[1][1].asnumpy()
        [[ 2.  2.  2.]
        [ 2.  2.  2.]]
        """
        assert(out is not None)
        ckeys, cvals = _ctype_key_value(key, out)
        check_call(_LIB.MXKVStorePull(
            self.handle, mx_uint(len(ckeys)), ckeys, cvals,
            ctypes.c_int(priority)))
    def set_optimizer(self, optimizer):
        """ Registers an optimizer with the kvstore.
        When using a single machine, this function updates the local optimizer.
        If using multiple machines and this operation is invoked from a worker node,
        it will serialized the optimizer with pickle and send it to all servers.
        The function returns after all servers have been updated.
        Parameters
        ----------
        optimizer : Optimizer
            The new optimizer for the store
        Examples
        --------
        >>> kv = mx.kv.create()
        >>> shape = (2, 2)
        >>> weight = mx.nd.zeros(shape)
        >>> kv.init(3, weight)
        >>> # set the optimizer for kvstore as the default SGD optimizer
        >>> kv.set_optimizer(mx.optimizer.SGD())
        >>> grad = mx.nd.ones(shape)
        >>> kv.push(3, grad)
        >>> kv.pull(3, out = weight)
        >>> # weight is updated via gradient descent
        >>> weight.asnumpy()
        array([[-0.01, -0.01],
               [-0.01, -0.01]], dtype=float32)
        """
        is_worker = ctypes.c_int()
        check_call(_LIB.MXKVStoreIsWorkerNode(ctypes.byref(is_worker)))
        # pylint: disable=invalid-name
        if 'dist' in self.type and is_worker.value:
            # send the optimizer to server
            try:
                # use ASCII protocol 0, might be slower, but not a big ideal
                optim_str = pickle.dumps(optimizer, 0)
            except:
                raise
            self._send_command_to_servers(0, optim_str)
        else:
            self._set_updater(opt.get_updater(optimizer))
    @property
    def type(self):
        """ Returns the type of this kvstore.
        Returns
        -------
        type : str
            the string type
        """
        kv_type = ctypes.c_char_p()
        check_call(_LIB.MXKVStoreGetType(self.handle, ctypes.byref(kv_type)))
        return py_str(kv_type.value)
    @property
    def rank(self):
        """ Returns the rank of this worker node.
        Returns
        -------
        rank : int
            The rank of this node, which is in range [0, num_workers())
        """
        rank = ctypes.c_int()
        check_call(_LIB.MXKVStoreGetRank(self.handle, ctypes.byref(rank)))
        return rank.value
    @property
    def num_workers(self):
        """Returns the number of worker nodes.
        Returns
        -------
        size :int
            The number of worker nodes.
        """
        size = ctypes.c_int()
        check_call(_LIB.MXKVStoreGetGroupSize(self.handle, ctypes.byref(size)))
        return size.value
    def save_optimizer_states(self, fname):
        """Saves the optimizer (updater) state to a file. This is often used when checkpointing
        the model during training.
        Parameters
        ----------
        fname : str
            Path to the output states file.
        """
        assert self._updater is not None, "Cannot save states for distributed training"
        with open(fname, 'wb') as fout:
            fout.write(self._updater.get_states())
    def load_optimizer_states(self, fname):
        """Loads the optimizer (updater) state from the file.
        Parameters
        ----------
        fname : str
            Path to input states file.
        """
        assert self._updater is not None, "Cannot save states for distributed training"
        self._updater.set_states(open(fname, 'rb').read())
    def _set_updater(self, updater):
        """Sets a push updater into the store.
        This function only changes the local store. When running on multiple machines one must
        use `set_optimizer`.
        Parameters
        ----------
        updater : function
            The updater function.
        Examples
        --------
        >>> def update(key, input, stored):
        ...     print "update on key: %d" % key
        ...     stored += input * 2
        >>> kv._set_updater(update)
        >>> kv.pull(3, out=a)
        >>> print a.asnumpy()
        [[ 4.  4.  4.]
        [ 4.  4.  4.]]
        >>> kv.push(3, mx.nd.ones(shape))
        update on key: 3
        >>> kv.pull(3, out=a)
        >>> print a.asnumpy()
        [[ 6.  6.  6.]
        [ 6.  6.  6.]]
        """
        self._updater = updater
        _updater_proto = ctypes.CFUNCTYPE(
            None, ctypes.c_int, NDArrayHandle, NDArrayHandle, ctypes.c_void_p)
        self._updater_func = _updater_proto(_updater_wrapper(updater))
        check_call(_LIB.MXKVStoreSetUpdater(self.handle, self._updater_func, None))
    def _barrier(self):
        """Invokes global barrier among all worker nodes.
        For example, assume there are `n` machines. We would like machine `0` to first
        `init` the values and then have all the workers `pull` the initialized value.
        Before pulling, we can place invoke `_barrier()` to guarantee that the
        initialization is finished.
        """
        check_call(_LIB.MXKVStoreBarrier(self.handle))
    def _send_command_to_servers(self, head, body):
        """Sends a command to all server nodes.
        Sending command to a server node will cause that server node to invoke
        ``KVStoreServer.controller`` to execute the command.
        This function returns after the command has been executed on all server
        nodes.
        Parameters
        ----------
        head : int
            the head of the command.
        body : str
            the body of the command.
        """
        check_call(_LIB.MXKVStoreSendCommmandToServers(
            self.handle, mx_uint(head), c_str(body)))
def create(name='local'):
    """Creates a new KVStore.
    For single machine training, there are two commonly used types:
    ``local``: Copies all gradients to CPU memory and updates weights there.
    ``device``: Aggregates gradients and updates weights on GPUs. With this setting,
    the KVStore also attempts to use GPU peer-to-peer communication,
    potentially accelerating the communication.
    For distributed training, KVStore also supports a number of types:
    ``dist_sync``: Behaves similarly to ``local`` but with one major difference.
    With ``dist_sync``, batch-size now means the batch size used on each machine.
    So if there are ``n`` machines and we use batch size ``b``,
    then ``dist_sync`` behaves like ``local`` with batch size ``n * b``.
    ``dist_device_sync``: Identical to ``dist_sync`` with the difference similar
    to ``device`` vs ``local``.
    ``dist_async``: Performs asynchronous updates.
    The weights are updated whenever gradients are received from any machine.
    No two updates happen on the same weight at the same time. However, the order is not
    guaranteed.
    Parameters
    ----------
    name : {'local', 'device', 'dist_sync', 'dist_device_sync', 'dist_async'}
        The type of KVStore.
    Returns
    -------
    kv : KVStore
        The created KVStore.
    """
    if not isinstance(name, string_types):
        raise TypeError('name must be a string')
    handle = KVStoreHandle()
    check_call(_LIB.MXKVStoreCreate(c_str(name),
                                    ctypes.byref(handle)))
    return KVStore(handle)
 | |
| 
	# -*- coding: utf-8 -*-
# See LICENSE.txt for licensing terms
#$URL$
#$Date$
#$Revision$
import shlex
from flowables import *
import rst2pdf.flowables
from styles import adjustUnits
from log import log, nodeid
def parseRaw(data, node):
    """Parse and process a simple DSL to handle creation of flowables.
    Supported (can add others on request):
    * PageBreak
    * Spacer width, height
    """
    elements = []
    lines = data.splitlines()
    for line in lines:
        lexer = shlex.shlex(line)
        lexer.whitespace += ','
        tokens = list(lexer)
        if not tokens:
            continue # Empty line
        command = tokens[0]
        if command == 'PageBreak':
            if len(tokens) == 1:
                elements.append(MyPageBreak())
            else:
                elements.append(MyPageBreak(tokens[1]))
        elif command == 'EvenPageBreak':
            if len(tokens) == 1:
                elements.append(MyPageBreak(breakTo='even'))
            else:
                elements.append(MyPageBreak(tokens[1],breakTo='even'))
        elif command == 'OddPageBreak':
            if len(tokens) == 1:
                elements.append(MyPageBreak(breakTo='odd'))
            else:
                elements.append(MyPageBreak(tokens[1],breakTo='odd'))
        elif command == 'FrameBreak':
            if len(tokens) == 1:
                elements.append(CondPageBreak(99999))
            else:
                elements.append(CondPageBreak(float(tokens[1])))
        elif command == 'Spacer':
            elements.append(MySpacer(adjustUnits(tokens[1]),
                adjustUnits(tokens[2])))
        elif command == 'Transition':
            elements.append(Transition(*tokens[1:]))
        elif command == 'SetPageCounter':
            elements.append(flowables.PageCounter(*tokens[1:]))
        else:
            log.error('Unknown command %s in raw pdf directive [%s]'%(command,nodeid(node)))
    return elements
from reportlab.lib.colors import Color, CMYKColor, getAllNamedColors, toColor, \
    HexColor
HAS_XHTML2PDF = True
try:
    from xhtml2pdf.util import COLOR_BY_NAME
    from xhtml2pdf.util import memoized
    from xhtml2pdf.context import pisaContext
    from xhtml2pdf.default import DEFAULT_CSS
    from xhtml2pdf.parser import pisaParser,pisaGetAttributes
    from xhtml2pdf.document import pisaStory
    from reportlab.platypus.flowables import Spacer
    from reportlab.platypus.frames import Frame
    from xhtml2pdf.xhtml2pdf_reportlab import PmlBaseDoc, PmlPageTemplate
    from xhtml2pdf.util import pisaTempFile, getBox, pyPdf
    import xhtml2pdf.parser as pisa_parser
except ImportError:
    try:
        from sx.pisa3.pisa_util import COLOR_BY_NAME
        memoized = lambda *a: a
        from sx.pisa3.pisa_context import pisaContext
        from sx.pisa3.pisa_default import DEFAULT_CSS
        from sx.pisa3.pisa_parser import pisaParser,pisaGetAttributes
        from sx.pisa3.pisa_document import pisaStory
        from reportlab.platypus.flowables import Spacer
        from reportlab.platypus.frames import Frame
        from sx.pisa3.pisa_reportlab import PmlBaseDoc, PmlPageTemplate
        from sx.pisa3.pisa_util import pisaTempFile, getBox, pyPdf
        import sx.pisa3.pisa_parser as pisa_parser
    except ImportError:
        HAS_XHTML2PDF = False
if HAS_XHTML2PDF:
    COLOR_BY_NAME['initial'] = Color(0, 0, 0)
    @memoized
    def getColor2(value, default=None):
        """
        Convert to color value.
        This returns a Color object instance from a text bit.
        """
        if isinstance(value, Color):
            return value
        value = str(value).strip().lower()
        if value == "transparent" or value == "none":
            return default
        if value in COLOR_BY_NAME:
            return COLOR_BY_NAME[value]
        if value.startswith("#") and len(value) == 4:
            value = "#" + value[1] + value[1] + value[2] + value[2] + value[3] + value[3]
        elif rgb_re.search(value):
            # e.g., value = "<css function: rgb(153, 51, 153)>", go figure:
            r, g, b = [int(x) for x in rgb_re.search(value).groups()]
            value = "#%02x%02x%02x" % (r, g, b)
        else:
            # Shrug
            pass
        return toColor(value, default) # Calling the reportlab function
    #import xhtml2pdf.util
    #xhtml2pdf.util.getColor = getColor2
    
    import cgi
    import logging
    from xml.dom import Node
    def pisaPreLoop2(node, context, collect=False):
        """
        Collect all CSS definitions
        """
        data = u""
        if node.nodeType == Node.TEXT_NODE and collect:
            data = node.data
        elif node.nodeType == Node.ELEMENT_NODE:
            name = node.tagName.lower()
            # print name, node.attributes.items()
            if name in ("style", "link"):
                attr = pisaGetAttributes(context, name, node.attributes)
                print " ", attr
                media = [x.strip() for x in attr.media.lower().split(",") if x.strip()]
                # print repr(media)
                if (attr.get("type", "").lower() in ("", "text/css") and (
                    not media or
                    "all" in media or
                    "print" in media or
                    "pdf" in media)):
                    if name == "style":
                        for node in node.childNodes:
                            data += pisaPreLoop2(node, context, collect=True)
                        #context.addCSS(data)
                        return u""
                        #collect = True
                    if name == "link" and attr.href and attr.rel.lower() == "stylesheet":
                        # print "CSS LINK", attr
                        context.addCSS('\n@import "%s" %s;' % (attr.href, ",".join(media)))
                        # context.addCSS(unicode(file(attr.href, "rb").read(), attr.charset))
        #else:
        #    print node.nodeType
        for node in node.childNodes:
            result = pisaPreLoop2(node, context, collect=collect)
            if collect:
                data += result
        return data
    pisa_parser.pisaPreLoop = pisaPreLoop2
        
    HTML_CSS = """
    html {
        font-family: Helvetica;
        font-size: 7px;
        font-weight: normal;
        color: #000000;
        background-color: transparent;
        margin: 0;
        padding: 0;
        line-height: 150%;
        border: 1px none;
        display: inline;
        width: auto;
        height: auto;
        white-space: normal;
    }
    b,
    strong {
        font-weight: bold;
    }
    i,
    em {
        font-style: italic;
    }
    u {
        text-decoration: underline;
    }
    s,
    strike {
        text-decoration: line-through;
    }
    a {
        text-decoration: underline;
        color: blue;
    }
    ins {
        color: green;
        text-decoration: underline;
    }
    del {
        color: red;
        text-decoration: line-through;
    }
    pre,
    code,
    kbd,
    samp,
    tt {
        font-family: "Courier New";
    }
    h1,
    h2,
    h3,
    h4,
    h5,
    h6 {
        font-weight:bold;
        -pdf-outline: true;
        -pdf-outline-open: false;
    }
    h1 {
        /*18px via YUI Fonts CSS foundation*/
        font-size:138.5%;
        -pdf-outline-level: 0;
    }
    h2 {
        /*16px via YUI Fonts CSS foundation*/
        font-size:123.1%;
        -pdf-outline-level: 1;
    }
    h3 {
        /*14px via YUI Fonts CSS foundation*/
        font-size:108%;
        -pdf-outline-level: 2;
    }
    h4 {
        -pdf-outline-level: 3;
    }
    h5 {
        -pdf-outline-level: 4;
    }
    h6 {
        -pdf-outline-level: 5;
    }
    h1,
    h2,
    h3,
    h4,
    h5,
    h6,
    p,
    pre,
    hr {
        margin:1em 0;
    }
    address,
    blockquote,
    body,
    center,
    dl,
    dir,
    div,
    fieldset,
    form,
    h1,
    h2,
    h3,
    h4,
    h5,
    h6,
    hr,
    isindex,
    menu,
    noframes,
    noscript,
    ol,
    p,
    pre,
    table,
    th,
    tr,
    td,
    ul,
    li,
    dd,
    dt,
    pdftoc {
        display: block;
    }
    table {
    }
    tr,
    th,
    td {
        vertical-align: middle;
        width: auto;
    }
    th {
        text-align: center;
        font-weight: bold;
    }
    center {
        text-align: center;
    }
    big {
        font-size: 125%;
    }
    small {
        font-size: 75%;
    }
    ul {
        margin-left: 1.5em;
        list-style-type: disc;
    }
    ul ul {
        list-style-type: circle;
    }
    ul ul ul {
        list-style-type: square;
    }
    ol {
        list-style-type: decimal;
        margin-left: 1.5em;
    }
    pre {
        white-space: pre;
    }
    blockquote {
        margin-left: 1.5em;
        margin-right: 1.5em;
    }
    noscript {
        display: none;
    }
    """
    def parseHTML(data, node):
        dest=None 
        path=None 
        link_callback=None 
        debug=0
        default_css=HTML_CSS
        xhtml=False
        encoding=None
        xml_output=None
        raise_exception=True
        capacity=100*1024
        # Prepare simple context
        context = pisaContext(path, debug=debug, capacity=capacity)
        context.pathCallback = link_callback
        # Build story
        context = pisaStory(data, path, link_callback, debug, default_css, xhtml,
                            encoding, context=context, xml_output=xml_output)
        return context.story
else: # no xhtml2pdf
    def parseHTML(data, none):
        log.error("You need xhtml2pdf installed to use the raw HTML directive.")
        return []
 | |
| 
	from pytest import raises
from mio.errors import AttributeError, TypeError
def test_basic_trait(mio, capfd):
    mio.eval("""
        TGreeting = Trait clone() do (
            hello = method(
                print("Hello", self getGreeting())
            )
        )
        World = Object clone() do (
            use(TGreeting)
            greeting = "World!"
            getGreeting = method(
               self greeting
            )
            setGreeting = method(aGreeting,
                self greeting = aGreeting
            )
        )
    """)
    mio.eval("World hello()")
    out, err = capfd.readouterr()
    assert out == "Hello World!\n"
    mio.eval("World setGreeting(\"John\")")
    mio.eval("World hello()")
    out, err = capfd.readouterr()
    assert out == "Hello John\n"
def test_basic_trait2(mio, capfd):
    mio.eval("""
        TGreeting = Trait clone() do (
            hello = method(
                print("Hello", self getGreeting())
            )
        )
        World = Object clone() do (
            use(TGreeting)
            greeting = "World!"
            getGreeting = method(
               self greeting
            )
            setGreeting = method(aGreeting,
                self greeting = aGreeting
            )
        )
    """)
    mio.eval("World hello()")
    out, err = capfd.readouterr()
    assert out == "Hello World!\n"
    mio.eval("World setGreeting(\"John\")")
    mio.eval("World hello()")
    out, err = capfd.readouterr()
    assert out == "Hello John\n"
    with raises(TypeError):
        mio.eval("World use(TGreeting)", reraise=True)
def test_invalid(mio):
    mio.eval("TGreetable = Object clone()")
    with raises(TypeError):
        mio.eval("Object clone() use(TGreetable)", reraise=True)
def test_state(mio):
    mio.eval("TGreetable = Trait clone()")
    with raises(TypeError):
        mio.eval("TGreetable greeting = \"World!\"", reraise=True)
def test_requirements(mio):
    mio.eval("""
        TGreetable = Trait clone() do(
            requires("greeting")
        )
    """)
    mio.eval("TGreetable requirements()") == [u"greeting"]
def test_requires(mio):
    mio.eval("""
        TGreetable = Trait clone() do(
            requires("greeting")
        )
    """)
    with raises(TypeError):
        mio.eval("Object clone() use(TGreetable)", reraise=True)
def test_resolution(mio):
    mio.eval("""
        TFoo = Trait clone() do(
            foo = method("foo")
        )
        TBar = Trait clone() do(
            foo = method("foo")
        )
    """)
    with raises(TypeError):
        mio.eval("Object clone() use(TFoo) use(TBar)", reraise=True)
def test_resolution2(mio):
    mio.eval("""
        TFoo = Trait clone() do(
            foo = method("foo")
        )
        TBar = Trait clone() do(
            foo = method("foo")
        )
    """)
    mio.eval("Foo = Object clone() use(TFoo) use(TBar, {\"foo\": \"bar\"})")
    assert mio.eval("Foo hasTrait(TFoo)")
    assert mio.eval("Foo hasTrait(TBar)")
    assert set(mio.eval("Foo behaviors")) == set([u"foo", u"bar"])
def test_resolution_deltrait(mio):
    mio.eval("""
        TFoo = Trait clone() do(
            foo = method("foo")
        )
        TBar = Trait clone() do(
            foo = method("foo")
        )
    """)
    mio.eval("Foo = Object clone() use(TFoo) use(TBar, {\"foo\": \"bar\"})")
    assert mio.eval("Foo hasTrait(TFoo)")
    assert mio.eval("Foo hasTrait(TBar)")
    assert set(mio.eval("Foo behaviors")) == set([u"foo", u"bar"])
    mio.eval("Foo delTrait(TFoo)")
    assert not mio.eval("Foo hasTrait(TFoo)")
    assert mio.eval("Foo behaviors") == ["bar"]
    mio.eval("Foo delTrait(TBar)")
    assert not mio.eval("Foo hasTrait(TBar)")
    assert mio.eval("Foo behaviors") == []
def test_adapt(mio):
    mio.eval("TGreetable = Trait clone()")
    assert mio.eval(
        "World = Object clone() adapt(TGreetable) hasTrait(TGreetable)")
def test_hasTrait(mio):
    mio.eval("""
        TGreetable = Trait clone()
        World = Object clone() do (
            use(TGreetable)
        )
    """)
    assert mio.eval("World hasTrait(TGreetable)")
def test_hasTrait2(mio):
    mio.eval("""
        TGreetable = Trait clone()
        World = Object clone() do (
            use(TGreetable)
        )
    """)
    assert mio.eval("World hasTrait(TGreetable)")
    assert mio.eval("World clone() hasTrait(TGreetable)")
def test_delTrait(mio):
    mio.eval("""
        TGreetable = Trait clone()
        World = Object clone() do (
            use(TGreetable)
        )
    """)
    assert mio.eval("World hasTrait(TGreetable)")
    mio.eval("World delTrait(TGreetable)")
    assert mio.eval("World behaviors") == []
    assert not mio.eval("World hasTrait(TGreetable)")
def test_delTrait2(mio, capfd):
    mio.eval("""
        TGreetable = Trait clone() do (
            hello = method(
                print("Hello World!")
            )
        )
        World = Object clone() do (
            use(TGreetable)
        )
    """)
    assert mio.eval("World hasTrait(TGreetable)")
    assert mio.eval("World behaviors") == ["hello"]
    assert mio.eval("World hello()").value is None
    out, err = capfd.readouterr()
    assert out == "Hello World!\n"
    mio.eval("World delTrait(TGreetable)")
    assert mio.eval("World behaviors") == []
    assert not mio.eval("World hasTrait(TGreetable)")
    with raises(AttributeError):
        mio.eval("World hello()", reraise=True)
def test_delTrait3(mio, capfd):
    mio.eval("""
        TGreetable = Trait clone() do (
            hello = method(
                print("Hello World!")
            )
        )
        World = Object clone() do (
            use(TGreetable)
        )
    """)
    assert mio.eval("World hasTrait(TGreetable)")
    assert mio.eval("World behaviors") == ["hello"]
    assert mio.eval("World hello()").value is None
    out, err = capfd.readouterr()
    assert out == "Hello World!\n"
    mio.eval("World delTrait(TGreetable)")
    assert mio.eval("World behaviors") == []
    assert not mio.eval("World hasTrait(TGreetable)")
    with raises(TypeError):
        mio.eval("World delTrait(TGreetable)", reraise=True)
def test_traits(mio):
    mio.eval("""
        TGreetable = Trait clone()
        World = Object clone() do (
            use(TGreetable)
        )
    """)
    TGreetable = mio.eval("TGreetable")
    assert mio.eval("World traits") == [TGreetable]
def test_behaviors(mio, capfd):
    mio.eval("""
        TGreetable = Trait clone() do (
            hello = method(
                print("Hello World!")
            )
        )
        World = Object clone() do (
            use(TGreetable)
        )
    """)
    assert mio.eval("World hello()").value is None
    out, err = capfd.readouterr()
    assert out == "Hello World!\n"
    assert mio.eval("World behaviors") == ["hello"]
def test_del_behavior(mio, capfd):
    mio.eval("""
        TGreetable = Trait clone() do (
            hello = method(
                print("Hello World!")
            )
        )
        World = Object clone() do (
            use(TGreetable)
        )
    """)
    assert mio.eval("World hello()").value is None
    out, err = capfd.readouterr()
    assert out == "Hello World!\n"
    assert mio.eval("World behaviors") == ["hello"]
    mio.eval("World del(\"hello\")")
    with raises(AttributeError):
        mio.eval("World hello()", reraise=True)
    assert mio.eval("World behaviors") == []
 | |
| 
	#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
Copyright (c) 2011 Tyler Kenendy <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .topping import Topping
from jawa.constants import *
from jawa.util.descriptor import method_descriptor
from burger.util import WalkerCallback, walk_method, try_eval_lambda
import six.moves
class BlocksTopping(Topping):
    """Gets most available block types."""
    PROVIDES = [
        "identify.block.superclass",
        "blocks"
    ]
    DEPENDS = [
        "identify.block.register",
        "identify.block.list",
        "identify.identifier",
        "language",
        "version.data",
        "version.is_flattened"
    ]
    @staticmethod
    def act(aggregate, classloader, verbose=False):
        data_version = aggregate["version"]["data"] if "data" in aggregate["version"] else -1
        if data_version >= 1901: # 18w43a
            BlocksTopping._process_1point14(aggregate, classloader, verbose)
            return # This also adds classes
        elif data_version >= 1461: # 18w02a
            BlocksTopping._process_1point13(aggregate, classloader, verbose)
        else:
            BlocksTopping._process_1point12(aggregate, classloader, verbose)
        # Shared logic: Go through the block list and add the field info.
        list = aggregate["classes"]["block.list"]
        lcf = classloader[list]
        blocks = aggregate["blocks"]
        block = blocks["block"]
        block_fields = blocks.setdefault("block_fields", {})
        # Find the static block, and load the fields for each.
        method = lcf.methods.find_one(name="<clinit>")
        blk_name = ""
        for ins in method.code.disassemble():
            if ins in ("ldc", "ldc_w"):
                const = ins.operands[0]
                if isinstance(const, String):
                    blk_name = const.string.value
            elif ins == "putstatic":
                if blk_name is None or blk_name == "Accessed Blocks before Bootstrap!":
                    continue
                const = ins.operands[0]
                field = const.name_and_type.name.value
                if blk_name in block:
                    block[blk_name]["field"] = field
                elif verbose:
                    print("Cannot find a block matching %s for field %s" % (blk_name, field))
                block_fields[field] = blk_name
    @staticmethod
    def _process_1point14(aggregate, classloader, verbose):
        # Handles versions after 1.14 (specifically >= 18w43a)
        # All of the registration happens in the list class in this version.
        listclass = aggregate["classes"]["block.list"]
        lcf = classloader[listclass]
        superclass = next(lcf.fields.find()).type.name # The first field in the list class is a block
        cf = classloader[superclass]
        aggregate["classes"]["block.superclass"] = superclass
        if "block" in aggregate["language"]:
            language = aggregate["language"]["block"]
        else:
            language = None
        # Figure out what the builder class is
        ctor = cf.methods.find_one(name="<init>")
        builder_class = ctor.args[0].name
        builder_cf = classloader[builder_class]
        # Sets hardness and resistance
        hardness_setter = builder_cf.methods.find_one(args='FF')
        # There's also one that sets both to the same value
        hardness_setter_2 = None
        for method in builder_cf.methods.find(args='F'):
            for ins in method.code.disassemble():
                if ins.mnemonic == "invokevirtual":
                    const = ins.operands[0]
                    if (const.name_and_type.name.value == hardness_setter.name.value and
                            const.name_and_type.descriptor.value == hardness_setter.descriptor.value):
                        hardness_setter_2 = method
                        break
        assert hardness_setter_2 != None
        # ... and one that sets them both to 0
        hardness_setter_3 = None
        for method in builder_cf.methods.find(args=''):
            for ins in method.code.disassemble():
                if ins.mnemonic == "invokevirtual":
                    const = ins.operands[0]
                    if (const.name_and_type.name.value == hardness_setter_2.name.value and
                            const.name_and_type.descriptor.value == hardness_setter_2.descriptor.value):
                        hardness_setter_3 = method
                        break
        assert hardness_setter_3 != None
        light_setter = builder_cf.methods.find_one(args='I')
        if light_setter == None:
            # 20w12a replaced the simple setter with one that takes a lambda
            # that is called to compute the light level for a given block
            # state.  Most blocks simply return a constant value, but some
            # such as sea pickles have varying light levels by state.
            light_setter = builder_cf.methods.find_one(args='Ljava/util/function/ToIntFunction;')
        assert light_setter != None
        blocks = aggregate.setdefault("blocks", {})
        block = blocks.setdefault("block", {})
        ordered_blocks = blocks.setdefault("ordered_blocks", [])
        block_fields = blocks.setdefault("block_fields", {})
        # Find the static block registration method
        method = lcf.methods.find_one(name='<clinit>')
        class Walker(WalkerCallback):
            def __init__(self):
                self.cur_id = 0
            def on_new(self, ins, const):
                class_name = const.name.value
                return {"class": class_name}
            def on_invoke(self, ins, const, obj, args):
                method_name = const.name_and_type.name.value
                method_desc = const.name_and_type.descriptor.value
                desc = method_descriptor(method_desc)
                if ins.mnemonic == "invokestatic":
                    if const.class_.name.value == listclass:
                        if len(desc.args) == 2 and desc.args[0].name == "java/lang/String" and desc.args[1].name == superclass:
                            # Call to the static register method.
                            text_id = args[0]
                            current_block = args[1]
                            current_block["text_id"] = text_id
                            current_block["numeric_id"] = self.cur_id
                            self.cur_id += 1
                            lang_key = "minecraft.%s" % text_id
                            if language != None and lang_key in language:
                                current_block["display_name"] = language[lang_key]
                            block[text_id] = current_block
                            ordered_blocks.append(text_id)
                            return current_block
                        elif len(desc.args) == 1 and desc.args[0].name == "int" and desc.returns.name == "java/util/function/ToIntFunction":
                            # 20w12a+: a method that takes a light level and returns a function
                            # that checks if the current block state has the lit state set,
                            # using light level 0 if not and the given light level if so.
                            # For our purposes, just simplify it to always be the given light level.
                            return args[0]
                        else:
                            # In 20w12a+ (1.16), some blocks (e.g. logs) use a separate method
                            # for initialization.  Call them.
                            sub_method = lcf.methods.find_one(name=method_name, args=desc.args_descriptor, returns=desc.returns_descriptor)
                            return walk_method(lcf, sub_method, self, verbose, args)
                    elif const.class_.name.value == builder_class:
                        if desc.args[0].name == superclass: # Copy constructor
                            copy = dict(args[0])
                            del copy["text_id"]
                            del copy["numeric_id"]
                            del copy["class"]
                            if "display_name" in copy:
                                del copy["display_name"]
                            return copy
                        else:
                            return {} # Append current block
                else:
                    if method_name == "hasNext":
                        # We've reached the end of block registration
                        # (and have started iterating over registry keys)
                        raise StopIteration()
                    if method_name == hardness_setter.name.value and method_desc == hardness_setter.descriptor.value:
                        obj["hardness"] = args[0]
                        obj["resistance"] = args[1]
                    elif method_name == hardness_setter_2.name.value and method_desc == hardness_setter_2.descriptor.value:
                        obj["hardness"] = args[0]
                        obj["resistance"] = args[0]
                    elif method_name == hardness_setter_3.name.value and method_desc == hardness_setter_3.descriptor.value:
                        obj["hardness"] = 0.0
                        obj["resistance"] = 0.0
                    elif method_name == light_setter.name.value and method_desc == light_setter.descriptor.value:
                        if args[0] != None:
                            obj["light"] = args[0]
                    elif method_name == "<init>":
                        # Call to the constructor for the block
                        # The majority of blocks have a 1-arg constructor simply taking the builder.
                        # However, sand has public BlockSand(int color, Block.Builder builder), and
                        # signs (as of 1.15-pre1) have public BlockSign(Block.builder builder, WoodType type)
                        # (Prior to that 1.15-pre1, we were able to assume that the last argument was the builder)
                        # There are also cases of arg-less constructors, which we just ignore as they are presumably not blocks.
                        for idx, arg in enumerate(desc.args):
                            if arg.name == builder_class:
                                obj.update(args[idx])
                                break
                    if desc.returns.name == builder_class or desc.returns.name == superclass:
                        return obj
                    elif desc.returns.name == aggregate["classes"]["identifier"]:
                        # Probably getting the air identifier from the registry
                        return "air"
                    elif desc.returns.name != "void":
                        return object()
            def on_get_field(self, ins, const, obj):
                if const.class_.name.value == superclass:
                    # Probably getting the static AIR resource location
                    return "air"
                elif const.class_.name.value == listclass:
                    return block[block_fields[const.name_and_type.name.value]]
                elif const.name_and_type.descriptor == "Ljava/util/function/ToIntFunction;":
                    # Light level lambda, used by candles.  Not something we
                    # can evaluate (it depends on the block state).
                    return None
                else:
                    return object()
            def on_put_field(self, ins, const, obj, value):
                if isinstance(value, dict):
                    field = const.name_and_type.name.value
                    value["field"] = field
                    block_fields[field] = value["text_id"]
            def on_invokedynamic(self, ins, const, args):
                # 1.15-pre2 introduced a Supplier<BlockEntityType> parameter,
                # and while most blocks handled it in their own constructor,
                # chests put it directly in initialization.  We don't care about
                # the value (we get block entities in a different way), but
                # we still need to override this as the default implementation
                # raises an exception
                # 20w12a changed light levels to use a lambda, and we do
                # care about those.  The light level is a ToIntFunction<BlockState>.
                method_desc = const.name_and_type.descriptor.value
                desc = method_descriptor(method_desc)
                if desc.returns.name == "java/util/function/ToIntFunction":
                    # Try to invoke the function.
                    try:
                        args.append(object()) # The state that the lambda gets
                        return try_eval_lambda(ins, args, lcf)
                    except Exception as ex:
                        if verbose:
                            print("Failed to call lambda for light data:", ex)
                        return None
                else:
                    return object()
        walk_method(lcf, method, Walker(), verbose)
    @staticmethod
    def _process_1point13(aggregate, classloader, verbose):
        # Handles versions after 1.13 (specifically >= 18w02a)
        superclass = aggregate["classes"]["block.register"]
        cf = classloader[superclass]
        aggregate["classes"]["block.superclass"] = superclass
        if "block" in aggregate["language"]:
            language = aggregate["language"]["block"]
        else:
            language = None
        # Figure out what the builder class is
        ctor = cf.methods.find_one(name="<init>")
        builder_class = ctor.args[0].name
        builder_cf = classloader[builder_class]
        # Sets hardness and resistance
        hardness_setter = builder_cf.methods.find_one(args='FF')
        # There's also one that sets both to the same value
        hardness_setter_2 = None
        for method in builder_cf.methods.find(args='F'):
            for ins in method.code.disassemble():
                if ins == "invokevirtual":
                    const = ins.operands[0]
                    if (const.name_and_type.name.value == hardness_setter.name.value and
                            const.name_and_type.descriptor.value == hardness_setter.descriptor.value):
                        hardness_setter_2 = method
                        break
        assert hardness_setter_2 != None
        # ... and one that sets them both to 0
        hardness_setter_3 = None
        for method in builder_cf.methods.find(args=''):
            for ins in method.code.disassemble():
                if ins == "invokevirtual":
                    const = ins.operands[0]
                    if (const.name_and_type.name.value == hardness_setter_2.name.value and
                            const.name_and_type.descriptor.value == hardness_setter_2.descriptor.value):
                        hardness_setter_3 = method
                        break
        assert hardness_setter_3 != None
        light_setter = builder_cf.methods.find_one(args='I')
        blocks = aggregate.setdefault("blocks", {})
        block = blocks.setdefault("block", {})
        ordered_blocks = blocks.setdefault("ordered_blocks", [])
        # Find the static block registration method
        method = cf.methods.find_one(args='', returns="V", f=lambda m: m.access_flags.acc_public and m.access_flags.acc_static)
        class Walker(WalkerCallback):
            def __init__(self):
                self.cur_id = 0
            def on_new(self, ins, const):
                class_name = const.name.value
                return {"class": class_name}
            def on_invoke(self, ins, const, obj, args):
                method_name = const.name_and_type.name.value
                method_desc = const.name_and_type.descriptor.value
                desc = method_descriptor(method_desc)
                if ins == "invokestatic":
                    if const.class_.name == superclass:
                        # Call to the static register method.
                        text_id = args[0]
                        current_block = args[1]
                        current_block["text_id"] = text_id
                        current_block["numeric_id"] = self.cur_id
                        self.cur_id += 1
                        lang_key = "minecraft.%s" % text_id
                        if language != None and lang_key in language:
                            current_block["display_name"] = language[lang_key]
                        block[text_id] = current_block
                        ordered_blocks.append(text_id)
                    elif const.class_.name == builder_class:
                        if desc.args[0].name == superclass: # Copy constructor
                            copy = dict(args[0])
                            del copy["text_id"]
                            del copy["numeric_id"]
                            del copy["class"]
                            if "display_name" in copy:
                                del copy["display_name"]
                            return copy
                        else:
                            return {} # Append current block
                else:
                    if method_name == "hasNext":
                        # We've reached the end of block registration
                        # (and have started iterating over registry keys)
                        raise StopIteration()
                    if method_name == hardness_setter.name and method_desc == hardness_setter.descriptor:
                        obj["hardness"] = args[0]
                        obj["resistance"] = args[1]
                    elif method_name == hardness_setter_2.name and method_desc == hardness_setter_2.descriptor:
                        obj["hardness"] = args[0]
                        obj["resistance"] = args[0]
                    elif method_name == hardness_setter_3.name and method_desc == hardness_setter_3.descriptor:
                        obj["hardness"] = 0.0
                        obj["resistance"] = 0.0
                    elif method_name == light_setter.name and method_desc == light_setter.descriptor:
                        obj["light"] = args[0]
                    elif method_name == "<init>":
                        # Call to the constructor for the block
                        # We can't hardcode index 0 because sand has an extra parameter, so use the last one
                        # There are also cases where it's an arg-less constructor; we don't want to do anything there.
                        if len(args) > 0:
                            obj.update(args[-1])
                    if desc.returns.name == builder_class:
                        return obj
                    elif desc.returns.name == aggregate["classes"]["identifier"]:
                        # Probably getting the air identifier from the registry
                        return "air"
                    elif desc.returns.name != "void":
                        return object()
            def on_get_field(self, ins, const, obj):
                if const.class_.name == superclass:
                    # Probably getting the static AIR resource location
                    return "air"
                else:
                    return object()
            def on_put_field(self, ins, const, obj, value):
                raise Exception("unexpected putfield: %s" % ins)
        walk_method(cf, method, Walker(), verbose)
    @staticmethod
    def _process_1point12(aggregate, classloader, verbose):
        # Handles versions prior to 1.13
        superclass = aggregate["classes"]["block.register"]
        cf = classloader[superclass]
        aggregate["classes"]["block.superclass"] = superclass
        is_flattened = aggregate["version"]["is_flattened"]
        individual_textures = True #aggregate["version"]["protocol"] >= 52 # assume >1.5 http://wiki.vg/Protocol_History#1.5.x since don't read packets TODO
        if "tile" in aggregate["language"]:
            language = aggregate["language"]["tile"]
        elif "block" in aggregate["language"]:
            language = aggregate["language"]["block"]
        else:
            language = None
        # Find the static block registration method
        method = cf.methods.find_one(args='', returns="V", f=lambda m: m.access_flags.acc_public and m.access_flags.acc_static)
        blocks = aggregate.setdefault("blocks", {})
        block = blocks.setdefault("block", {})
        ordered_blocks = blocks.setdefault("ordered_blocks", [])
        tmp = []
        stack = []
        locals = {}
        for ins in method.code.disassemble():
            if ins == "new":
                # The beginning of a new block definition
                const = ins.operands[0]
                class_name = const.name.value
                current_block = {
                    "class": class_name,
                    "calls": {}
                }
                stack.append(current_block)
            elif ins.mnemonic.startswith("fconst"):
                stack.append(float(ins.mnemonic[-1]))
            elif ins == "aconst_null":
                stack.append(None)
            elif ins in ("bipush", "sipush"):
                stack.append(ins.operands[0].value)
            elif ins == "fdiv":
                den = stack.pop()
                num = stack.pop()
                if isinstance(den, (float, int)) and isinstance(num, dict) and "scale" in num:
                    num["scale"] /= den
                    stack.append(num)
                else:
                    stack.append({"numerator": num, "denominator": den})
            elif ins in ("ldc", "ldc_w"):
                const = ins.operands[0]
                if isinstance(const, ConstantClass):
                    stack.append("%s.class" % const.name.value)
                elif isinstance(const, String):
                    stack.append(const.string.value)
                else:
                    stack.append(const.value)
            elif ins == "getstatic":
                const = ins.operands[0]
                if const.class_.name == superclass:
                    # Probably getting the static AIR resource location
                    stack.append("air")
                else:
                    stack.append({"obj": None, "field": repr(const)})
            elif ins == "getfield":
                const = ins.operands[0]
                obj = stack.pop()
                if "text_id" in obj:
                    stack.append({
                        "block": obj["text_id"],
                        "field": const.name_and_type.name.value,
                        "scale": 1
                    })
                else:
                    stack.append({"obj": obj, "field": repr(const)})
            elif ins in ("invokevirtual", "invokespecial", "invokeinterface"):
                # A method invocation
                const = ins.operands[0]
                method_name = const.name_and_type.name.value
                method_desc = const.name_and_type.descriptor.value
                desc = method_descriptor(method_desc)
                num_args = len(desc.args)
                if method_name == "hasNext":
                    # We've reached the end of block registration
                    # (and have started iterating over registry keys)
                    break
                args = []
                for i in six.moves.range(num_args):
                    args.insert(0, stack.pop())
                obj = stack.pop()
                if "calls" in obj:
                    obj["calls"][method_name + method_desc] = args
                if desc.returns.name != "void":
                    if desc.returns.name == superclass:
                        stack.append(obj)
                    else:
                        stack.append({"obj": obj, "method": const, "args": args})
            elif ins == "invokestatic":
                # Call to the registration method
                const = ins.operands[0]
                desc = method_descriptor(const.name_and_type.descriptor.value)
                num_args = len(desc.args)
                if num_args == 3:
                    current_block = stack.pop()
                    current_block["text_id"] = stack.pop()
                    current_block["numeric_id"] = stack.pop()
                else:
                    assert num_args == 2
                    current_block = stack.pop()
                    current_block["text_id"] = stack.pop()
                tmp.append(current_block)
            elif ins == "astore":
                locals[ins.operands[0].value] = stack.pop()
            elif ins == "aload":
                stack.append(locals[ins.operands[0].value])
            elif ins == "dup":
                stack.append(stack[-1])
            elif ins == "checkcast":
                pass
            elif verbose:
                print("Unknown instruction %s: stack is %s" % (ins, stack))
        # Now that we have all of the blocks, we need a few more things
        # to make sense of what it all means. So,
        #   1. Find the function that returns 'this' and accepts a string.
        #      This is the name or texture setting function.
        #   2. Find the function that returns 'this' and accepts a float.
        #      This is the function that sets the hardness.
        string_setter = cf.methods.find_one(returns="L" + superclass + ";",
                args="Ljava/lang/String;",
                f=lambda x: not x.access_flags.acc_static)
        if string_setter:
            name_setter = string_setter.name.value + cf.constants.get(string_setter.descriptor.index).value
        else:
            name_setter = None
        float_setters = list(cf.methods.find(
            returns="L" + superclass + ";",
            args="F",
            f=lambda x: x.access_flags.acc_protected
        ))
        for method in float_setters:
            fld = None
            for ins in method.code.disassemble():
                if ins == "putfield":
                    const = ins.operands[0]
                    fld = const.name_and_type.name.value
                elif ins == "ifge":
                    hardness_setter = method.name.value + method.descriptor.value
                    hardness_field = fld
                    break
        for method in float_setters:
            # Look for the resistance setter, which multiplies by 3.
            is_resistance = False
            for ins in method.code.disassemble():
                if ins in ("ldc", "ldc_w"):
                    is_resistance = (ins.operands[0].value == 3.0)
                elif ins == "fmul" and is_resistance:
                    resistance_setter = method.name.value + method.descriptor.value
                elif ins == "putfield" and is_resistance:
                    const = ins.operands[0]
                    resistance_field = const.name_and_type.name.value
                    break
                else:
                    is_resistance = False
        for method in float_setters:
            # Look for the light setter, which multiplies by 15, but 15 is the first value (15 * val)
            is_light = False
            for ins in method.code.disassemble():
                if ins in ("ldc", "ldc_w"):
                    is_light = (ins.operands[0].value == 15.0)
                elif ins.mnemonic.startswith("fload"):
                    pass
                elif ins == "fmul" and is_light:
                    light_setter = method.name.value + method.descriptor.value
                    break
                else:
                    is_light = False
        if is_flattened:
            # Current IDs are incremental, manually track them
            cur_id = 0
        for blk in tmp:
            if not "text_id" in blk:
                if verbose:
                    print("Dropping nameless block:", blk)
                continue
            final = {}
            if "numeric_id" in blk:
                assert not is_flattened
                final["numeric_id"] = blk["numeric_id"]
            else:
                assert is_flattened
                final["numeric_id"] = cur_id
                cur_id += 1
            if "text_id" in blk:
                final["text_id"] = blk["text_id"]
            final["class"] = blk["class"]
            if name_setter in blk["calls"]:
                final["name"] = blk["calls"][name_setter][0]
            if "name" in final:
                lang_key = "%s.name" % final["name"]
            else:
                # 17w43a (1.13) and above - no specific translation string, only the id
                lang_key = "minecraft.%s" % final["text_id"]
            if language and lang_key in language:
                final["display_name"] = language[lang_key]
            if hardness_setter not in blk["calls"]:
                final["hardness"] = 0.0
                final["resistance"] = 0.0
            else:
                stack = blk["calls"][hardness_setter]
                if len(stack) == 0:
                    if verbose:
                        print("%s: Broken hardness value" % final["text_id"])
                    final["hardness"] = 0.0
                    final["resistance"] = 0.0
                else:
                    hardness = blk["calls"][hardness_setter][0]
                    if isinstance(hardness, dict) and "field" in hardness:
                        # Repair field info
                        assert hardness["field"] == hardness_field
                        assert "block" in hardness
                        assert hardness["block"] in block
                        hardness = block[hardness["block"]]["hardness"] * hardness["scale"]
                    final["hardness"] = hardness
                    # NOTE: vanilla multiples this value by 5, but then divides by 5 later
                    # Just ignore that multiplication to avoid confusion.
                    final["resistance"] = hardness
            if resistance_setter in blk["calls"]:
                resistance = blk["calls"][resistance_setter][0]
                if isinstance(resistance, dict) and "field" in resistance:
                    # Repair field info
                    assert resistance["field"] == resistance_field
                    assert "block" in resistance
                    assert resistance["block"] in block
                    resistance = block[resistance["block"]]["resistance"] * resistance["scale"]
                # The * 3 is also present in vanilla, strange logic
                # Division to normalize for the multiplication/division by 5.
                final["resistance"] = resistance * 3.0 / 5.0
            # Already set in the hardness area, so no need for an else clause
            if light_setter in blk["calls"]:
                final["light"] = int(blk["calls"][light_setter][0] * 15)
            ordered_blocks.append(final["text_id"])
            block[final["text_id"]] = final
 | |
| 
	#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
from pyvows import Vows, expect
from vows.transformer_test_data import TESTITEMS, FIT_IN_CROP_DATA, TestData, MockSyncDetector, MockErrorSyncDetector
from thumbor.transformer import Transformer
class EngineContext(Vows.Context):
    def _prepare_engine(self, topic, callback):
        context = topic[0].to_context()
        self.engine = context.modules.engine
        self.test_data = topic
        trans = Transformer(context)
        trans.transform(callback)
@Vows.assertion
def to_be_resized(topic):
    expect(topic.has_resized_properly()).to_be_true()
@Vows.assertion
def to_be_cropped(topic):
    expect(topic.has_cropped_properly()).to_be_true()
@Vows.batch
class TransformerVows(Vows.Context):
    class InvalidCrop(Vows.Context):
        @Vows.async_topic
        def topic(self, callback):
            data = TestData(
                source_width=800, source_height=600,
                target_width=800, target_height=600,
                halign="right", valign="top",
                focal_points=[],
                crop_left=200, crop_top=0, crop_right=100, crop_bottom=100
            )
            ctx = data.to_context()
            self.engine = ctx.modules.engine
            trans = Transformer(ctx)
            trans.transform(callback)
        def should_not_crop(self, topic):
            expect(self.engine.calls['crop']).to_be_empty()
    class MetaWithOrientation(Vows.Context):
        @Vows.async_topic
        def topic(self, callback):
            data = TestData(
                source_width=800, source_height=600,
                target_width=100, target_height=100,
                halign="right", valign="top",
                focal_points=[],
                crop_left=None, crop_top=None, crop_right=None, crop_bottom=None,
                meta=True
            )
            ctx = data.to_context()
            ctx.config.RESPECT_ORIENTATION = True
            self.engine = ctx.modules.engine
            trans = Transformer(ctx)
            trans.transform(callback)
        def should_work_well(self, topic):
            expect(topic).to_be_true()
    class Flip(Vows.Context):
        @Vows.async_topic
        def topic(self, callback):
            data = TestData(
                source_width=800, source_height=600,
                target_width=-800, target_height=-600,
                halign="right", valign="top",
                focal_points=[],
                crop_left=None, crop_top=None, crop_right=None, crop_bottom=None
            )
            ctx = data.to_context()
            self.engine = ctx.modules.engine
            trans = Transformer(ctx)
            trans.transform(callback)
        def should_do_horizontal_flip(self, topic):
            expect(self.engine.calls['horizontal_flip']).to_equal(1)
        def should_do_vertical_flip(self, topic):
            expect(self.engine.calls['vertical_flip']).to_equal(1)
    class ExtractCover(Vows.Context):
        @Vows.async_topic
        def topic(self, callback):
            data = TestData(
                source_width=800, source_height=600,
                target_width=-800, target_height=-600,
                halign="right", valign="top",
                focal_points=[],
                crop_left=None, crop_top=None, crop_right=None, crop_bottom=None
            )
            ctx = data.to_context()
            ctx.request.filters = 'cover()'
            ctx.request.image = 'some.gif'
            ctx.request.extension = 'GIF'
            ctx.request.engine.extension = '.gif'
            ctx.config.USE_GIFSICLE_ENGINE = True
            self.engine = ctx.modules.engine
            trans = Transformer(ctx)
            trans.transform(callback)
        def should_do_extract_cover(self, topic):
            expect(self.engine.calls['cover']).to_equal(1)
    class ResizeCrop(Vows.Context):
        def topic(self):
            for item in TESTITEMS:
                yield item
        class AsyncResizeCrop(Vows.Context):
            @Vows.async_topic
            def topic(self, callback, topic):
                self.test_data = topic
                context = topic.to_context()
                trans = Transformer(context)
                trans.transform(callback)
            def should_resize_properly(self, topic):
                expect(self.test_data).to_be_resized()
            def should_crop_properly(self, topic):
                expect(self.test_data).to_be_cropped()
    class ResizeCropWithDetectors(Vows.Context):
        def topic(self):
            for item in TESTITEMS:
                yield item
        class AsyncResizeCrop(Vows.Context):
            @Vows.async_topic
            def topic(self, callback, topic):
                self.test_data = topic
                context = topic.to_context(detectors=[MockSyncDetector])
                trans = Transformer(context)
                trans.transform(callback)
            def should_resize_properly(self, topic):
                expect(self.test_data).to_be_resized()
            def should_crop_properly(self, topic):
                expect(self.test_data).to_be_cropped()
    class ResizeCropWithDetectorErrorsIgnored(Vows.Context):
        @Vows.async_topic
        def topic(self, callback):
            self.test_data = TestData(
                source_width=800, source_height=600,
                target_width=400, target_height=150,
                halign="center", valign="middle",
                focal_points=[],
                crop_left=0, crop_top=75, crop_right=800, crop_bottom=375
            )
            context = self.test_data.to_context(detectors=[MockErrorSyncDetector], ignore_detector_error=True)
            trans = Transformer(context)
            trans.transform(callback)
        def should_resize_properly(self, topic):
            expect(self.test_data).to_be_resized()
        def should_crop_properly(self, topic):
            expect(self.test_data).to_be_cropped()
    class ResizeCropWithoutDetectorErrorsIgnored(Vows.Context):
        @Vows.async_topic
        def topic(self, callback):
            self.test_data = TestData(
                source_width=800, source_height=600,
                target_width=400, target_height=150,
                halign="center", valign="middle",
                focal_points=[],
                crop_left=0, crop_top=75, crop_right=800, crop_bottom=375
            )
            context = self.test_data.to_context(detectors=[MockErrorSyncDetector], ignore_detector_error=False)
            trans = Transformer(context)
            trans.transform(callback)
        def should_resize_properly(self, topic):
            expect(self.test_data.engine.calls['resize']).to_length(0)
    class FitIn(Vows.Context):
        def topic(self, callback):
            for item in FIT_IN_CROP_DATA:
                yield item
        class AsyncFitIn(EngineContext):
            @Vows.async_topic
            def topic(self, callback, topic):
                self._prepare_engine(topic, callback)
            def should_not_have_crop(self, topic):
                expect(self.engine.calls['crop']).to_be_empty()
            def should_have_resize(self, topic):
                if not self.test_data[1][2]:
                    expect(self.engine.calls['resize']).to_be_empty()
                    return
                expect(self.engine.calls['resize']).not_to_be_empty()
            def should_have_proper_resize_calls(self, topic):
                length = self.test_data[1][2]
                expect(self.engine.calls['resize']).to_length(length)
            def should_have_proper_width(self, topic):
                if not self.test_data[1][2]:
                    return
                expect(self.engine.calls['resize'][0]['width']).to_equal(self.test_data[1][0])
            def should_have_proper_height(self, topic):
                if not self.test_data[1][2]:
                    return
                expect(self.engine.calls['resize'][0]['height']).to_equal(self.test_data[1][1])
 | |
| 
	"""
This module implements a class derived from dicts for working with timeseries.
"""
from copy import deepcopy
import json
from .tsslist import TssList
from .timeseries import Timeseries
class TssDict(dict):
    """
    This class is a way of handling some of the routine tasks for groups
    of timeseries.
    Assumption:
        This is a dict of timeseries that are keyed by tickers, etc. Or, it
        could be a dict of keys that hold lists of timeseries with some
        commonality.
    Usage:
        tssdict = TssDict(values=None)
    values can be a dict, list, using the key from each timeseries as the
    dict key.
    """
    timeseries_class = Timeseries
    def __init__(self, values=None):
        dict.__init__(self)  # only did this to satisfy pylint
        if isinstance(values, dict):
            for key, values in values.items():
                self[key] = values
        elif isinstance(values, list):
            for i, ts_tmp in enumerate(values):
                ts_tmp = values[i]
                self[ts_tmp.key] = ts_tmp
        else:
            # nothing to do.
            pass
    def min_date(self):
        """
        Returns the earliest date as a tuple(datetime, key in the group).
        """
        min_date = None
        min_key = None
        for key, values in self.items():
            if isinstance(values, Timeseries):
                date = values.start_date("datetime")
                if min_date is not None:
                    if date < min_date:
                        min_date = date
                        min_key = key
                else:
                    min_date = date
                    min_key = key
            else:
                # what is it?
                raise ValueError("Unsupported values in dict")
        return (min_date, min_key)
    def max_date(self):
        """
        Returns the latest date, key in the group
        If more than one has the same max date, simply one of them is
        returned.
        """
        max_date = None
        max_key = None
        for key, values in self.items():
            if isinstance(values, Timeseries):
                date = values.end_date("datetime")
                if max_date is not None:
                    date = date
                    if date > max_date:
                        max_date = date
                        max_key = key
                else:
                    max_date = date
                    max_key = key
            else:
                # what is it?
                raise ValueError("Unsupported values in dict")
        return (max_date, max_key)
    def longest_ts(self):
        """
        This function returns item with the longest timeseries.
        """
        max_length = 0
        max_key = None
        for key, ts in self.items():
            if isinstance(ts, Timeseries):
                if ts.tseries is not None:
                    length = ts.tseries.shape[0]
                    if length > max_length:
                        max_length = length
                        max_key = key
            else:
                # what is it?
                raise ValueError("Unsupported values in dict")
        return (max_length, max_key)
    def shortest_ts(self):
        """
        This function returns item with the shortest timeseries.
        """
        min_length = None
        min_key = None
        for key, ts in self.items():
            if isinstance(ts, Timeseries):
                if ts.tseries is None:
                    return None
                length = ts.tseries.shape[0]
                if min_length is None:
                    min_length = length
                else:
                    if length < min_length:
                        min_length = length
                        min_key = key
            else:
                # what is it?
                raise ValueError("Unsupported values in dict")
        return (min_length, min_key)
    def get_values(self, date, keys=None, notify=False):
        """
        This function finds the values as the date. If keys come in as a list
        the order of the values can be controlled or to limit the timeseries
        selected.
        The point of notify is to not fail gracefully if necessary.
        """
        if keys is None:
            keys = self.keys()
        all_values = []
        for key in keys:
            tmp = self[key]
            if isinstance(tmp, Timeseries):
                try:
                    all_values.append(tmp.tseries[tmp.row_no(date)])
                except ValueError:
                    if notify:
                        raise ValueError(
                            "ts %s does not have a value on %s" % (key, date)
                        )
                    else:
                        all_values.append(None)
            else:
                raise ValueError("Unsupported values in dict")
        return (tuple(all_values), tuple(keys))
    def combine(self, keys=None, discard=True, pad=None):
        """
        This function combines all timeseries into one. The point of keys is
        that you have the ability to control the order of the columns.
        Usage:
            combine(self, keys=None, pad=None)
        returns ts, keys
        """
        def iter_combine(ts1, item, discard=discard, pad=pad):
            """This function combines an item with an existing timeseries. """
            if isinstance(item, TssList):
                if ts1 is None:
                    ts1 = item.combine(discard=discard, pad=pad)
                else:
                    ts1.combine(item, discard=discard, pad=pad)
            elif isinstance(item, list):
                if ts1 is None:
                    ts1 = TssList(item).combine(discard=discard, pad=pad)
                else:
                    ts1.combine(item, discard=discard, pad=pad)
            elif isinstance(item, Timeseries):
                if ts1 is None:
                    ts1 = item.clone()
                else:
                    ts1 = ts1.combine(item, discard=discard, pad=pad)
            elif isinstance(item, TssDict):
                if ts1 is None:
                    ts1, _ = item.combine(discard=discard, pad=pad)
                else:
                    ts1.combine(
                        item.combine(discard=discard, pad=pad),
                        discard=discard,
                        pad=pad,
                    )
            else:
                raise ValueError("Unsupported type in for \n%s" % (item))
            return ts1
        if keys is None:
            keys = self.keys()
        if len(keys) == 0:
            return None
        ts1 = None
        for key in keys:
            ts1 = iter_combine(ts1, self[key], discard=discard, pad=pad)
        return ts1, tuple(keys)
    def clone(self):
        """
        Returns a new copy of the object.
        """
        return deepcopy(self)
    def to_dict(self, dt_fmt="str", data_list=True):
        """
        This function outputs the entirety of the object as a dict with
        the timeseries components as a dict as well.
        This enables building JSON formatted files from objects that include
        TssDict objects.
        Usage:
            self.to_dict(dt_fmt='str', data_list=True)
        """
        outdict = {}
        for key, ts in self.items():
            outdict[key] = ts.to_dict(dt_fmt=dt_fmt, data_list=data_list)
        return outdict
    def from_dict(self, tssdict):
        """
        This function loads from a dict.
        The format of the dict of timeseries is assumed to use the form from
        Timeseries.to_dict(dt_fmt='str')
        """
        self.clear()
        for key, value in tssdict.items():
            self[key] = self.timeseries_class().from_dict(value)
        return self
    def to_json(self, indent=2, dt_fmt="str", data_list=True):
        """
        This function returns the timeseries dict in JSON format.
        Usage:
            self.to_json(indent=2, dt_fmt='str', data_list=True)
        indent: indenting in the JSON output
        dt_fmt: formatting of the dates. Look at help for
                    Timeseries.to_dict
        data_list: Whether data uses a dict for dates as keys or
                   simply a list.
                   Default is for a list. Otherwise, sorting the
                   timeseries in the list would be required.
        """
        return json.dumps(
            self.to_dict(dt_fmt=dt_fmt, data_list=data_list), indent=indent
        )
    def from_json(self, json_str):
        """
        This function loads a JSON string and applies it to the object.
        """
        self.clear()
        tss_tmp = json.loads(json_str)
        if isinstance(tss_tmp, dict):
            for key, value in tss_tmp.items():
                self[key] = self.timeseries_class().from_dict(value)
        else:
            raise ValueError(
                "Incoming JSON string does not start with a dict."
            )
        return self
 | |
| 
	from __future__ import division, print_function, absolute_import
import numpy as np
import nose
from nose.tools import assert_true, assert_false, assert_equal, assert_almost_equal
from numpy.testing import assert_array_equal, assert_array_almost_equal
from dipy.tracking import metrics as tm
from dipy.tracking import distances as pf
def test_LSCv2():
    xyz1=np.array([[1,0,0],[2,0,0],[3,0,0]],dtype='float32')
    xyz2=np.array([[1,0,0],[1,2,0],[1,3,0]],dtype='float32')
    xyz3=np.array([[1.1,0,0],[1,2,0],[1,3,0]],dtype='float32')
    xyz4=np.array([[1,0,0],[2.1,0,0],[3,0,0]],dtype='float32')
    
    xyz5=np.array([[100,0,0],[200,0,0],[300,0,0]],dtype='float32')
    xyz6=np.array([[0,20,0],[0,40,0],[300,50,0]],dtype='float32')
    
    T=[xyz1,xyz2,xyz3,xyz4,xyz5,xyz6]
    C=pf.local_skeleton_clustering(T,0.2)
    
    #print C
    #print len(C)
    
    C2=pf.local_skeleton_clustering_3pts(T,0.2)
    
    #print C2
    #print len(C2)
            
    #"""
    
    for i in range(40):
        xyz=np.random.rand(3,3).astype('f4')
        T.append(xyz)
            
    from time import time
    t1=time()
    C3=pf.local_skeleton_clustering(T,.5)
    t2=time()
    print(t2-t1)
    print(len(C3))
    
    t1=time()
    C4=pf.local_skeleton_clustering_3pts(T,.5)
    t2=time()
    print(t2-t1)
    print(len(C4))
    for c in C3:
        assert_equal(np.sum(C3[c]['hidden']-C4[c]['hidden']),0)
    
    T2=[]
    for i in range(10**4):
        xyz=np.random.rand(10,3).astype('f4')
        T2.append(xyz)
    t1=time()
    C5=pf.local_skeleton_clustering(T2,.5)
    t2=time()
    print(t2-t1)
    print(len(C5))
    
    from dipy.data import get_data
    from nibabel import trackvis as tv
    try:
        from dipy.viz import fvtk
    except ImportError as e:
        raise nose.plugins.skip.SkipTest(
            'Fails to import dipy.viz due to %s' % str(e))
    
    streams,hdr=tv.read(get_data('fornix'))
    T3=[tm.downsample(s[0],6) for s in streams]    
    
    print('lenT3',len(T3))
    
    C=pf.local_skeleton_clustering(T3,10.)
    
    print('lenC',len(C))
    
    """
    
    r=fvtk.ren()
    colors=np.zeros((len(C),3))
    for c in C:
        color=np.random.rand(3)
        for i in C[c]['indices']:
            fvtk.add(r,fvtk.line(T3[i],color))
        colors[c]=color
    fvtk.show(r)
    fvtk.clear(r)
    skeleton=[]
    
    def width(w):
        if w<1:
            return 1
        else:
            return w
    
    for c in C:
    
        bundle=[T3[i] for i in C[c]['indices']]
        si,s=pf.most_similar_track_mam(bundle,'avg')    
        skeleton.append(bundle[si])
        fvtk.label(r,text=str(len(bundle)),pos=(bundle[si][-1]),scale=(2,2,2))
        fvtk.add(r,fvtk.line(skeleton,colors,opacity=1,linewidth=width(len(bundle)/10.)))
    
    fvtk.show(r)
    
    """
    
def test_bundles_distances_mam():
    xyz1A = np.array([[0,0,0],[1,0,0],[2,0,0],[3,0,0]],dtype='float32')
    xyz2A = np.array([[0,1,1],[1,0,1],[2,3,-2]],dtype='float32')
    xyz1B = np.array([[-1,0,0],[2,0,0],[2,3,0],[3,0,0]],dtype='float32')
    tracksA = [xyz1A, xyz2A]
    tracksB = [xyz1B, xyz1A, xyz2A]
    for metric in ('avg', 'min', 'max'):       
        DM2 = pf.bundles_distances_mam(tracksA, tracksB, metric=metric)
        
def test_bundles_distances_mdf():
    xyz1A = np.array([[0,0,0],[1,0,0],[2,0,0]],dtype='float32')    
    xyz2A = np.array([[0,1,1],[1,0,1],[2,3,-2]],dtype='float32')
    xyz3A = np.array([[0,0,0],[1,0,0],[3,0,0]],dtype='float32')    
    xyz1B = np.array([[-1,0,0],[2,0,0],[2,3,0]],dtype='float32')
       
    tracksA = [xyz1A,xyz2A]
    tracksB = [xyz1B, xyz1A, xyz2A]           
    DM2 = pf.bundles_distances_mdf(tracksA, tracksB)
    
    tracksA = [xyz1A,xyz1A]
    tracksB = [xyz1A,xyz1A]
    
    DM2 = pf.bundles_distances_mdf(tracksA, tracksB)
    assert_array_almost_equal(DM2,np.zeros((2,2)))
    
    tracksA = [xyz1A,xyz3A]
    tracksB = [xyz2A]
    
    DM2 = pf.bundles_distances_mdf(tracksA, tracksB)
    print(DM2)
        
    #assert_array_almost_equal(DM2,np.zeros((2,2)))    
    DM=np.zeros(DM2.shape)
    for (a,ta) in enumerate(tracksA):
        for (b,tb) in enumerate(tracksB):
            md=np.sum(np.sqrt(np.sum((ta-tb)**2,axis=1)))/3.
            md2=np.sum(np.sqrt(np.sum((ta-tb[::-1])**2,axis=1)))/3.
            DM[a,b]=np.min((md,md2))
    print(DM)
    
    print('--------------')
    for t in tracksA:
        print(t)
    print('--------------')
    for t in tracksB:
        print(t)
        
    assert_array_almost_equal(DM,DM2,4)
    
def test_mam_distances():
    xyz1 = np.array([[0,0,0],[1,0,0],[2,0,0],[3,0,0]])
    xyz2 = np.array([[0,1,1],[1,0,1],[2,3,-2]])
    # dm=array([[ 2,  2, 17], [ 3,  1, 14], [6,  2, 13], [11,  5, 14]])
    # this is the distance matrix between points of xyz1
    # and points of xyz2
    xyz1=xyz1.astype('float32')
    xyz2=xyz2.astype('float32')
    zd2 = pf.mam_distances(xyz1,xyz2)
    assert_almost_equal( zd2[0], 1.76135602742)
def test_approx_ei_traj():
    
    segs=100
    t=np.linspace(0,1.75*2*np.pi,segs)    
    x =t 
    y=5*np.sin(5*t)
    z=np.zeros(x.shape)    
    xyz=np.vstack((x,y,z)).T
    xyza=pf.approx_polygon_track(xyz)
    assert_equal(len(xyza), 27)
def test_approx_mdl_traj():
    
    t=np.linspace(0,1.75*2*np.pi,100)
    x = np.sin(t)
    y = np.cos(t)
    z = t    
    xyz=np.vstack((x,y,z)).T     
    xyza1 = pf.approximate_mdl_trajectory(xyz,alpha=1.)
    xyza2 = pf.approximate_mdl_trajectory(xyz,alpha=2.)    
    assert_equal(len(xyza1), 10)
    assert_equal(len(xyza2), 8)
    assert_array_almost_equal( xyza1, np.array([[  0.00000000e+00,   1.00000000e+00,   0.00000000e+00],
       [  9.39692621e-01,   3.42020143e-01,   1.22173048e+00],
       [  6.42787610e-01,  -7.66044443e-01,   2.44346095e+00],
       [ -5.00000000e-01,  -8.66025404e-01,   3.66519143e+00],
       [ -9.84807753e-01,   1.73648178e-01,   4.88692191e+00],
       [ -1.73648178e-01,   9.84807753e-01,   6.10865238e+00],
       [  8.66025404e-01,   5.00000000e-01,   7.33038286e+00],
       [  7.66044443e-01,  -6.42787610e-01,   8.55211333e+00],
       [ -3.42020143e-01,  -9.39692621e-01,   9.77384381e+00],
       [ -1.00000000e+00,  -4.28626380e-16,   1.09955743e+01]]))
    
    assert_array_almost_equal(xyza2, np.array([[  0.00000000e+00,   1.00000000e+00,   0.00000000e+00],
       [  9.95471923e-01,  -9.50560433e-02,   1.66599610e+00],
       [ -1.89251244e-01,  -9.81928697e-01,   3.33199221e+00],
       [ -9.59492974e-01,   2.81732557e-01,   4.99798831e+00],
       [  3.71662456e-01,   9.28367933e-01,   6.66398442e+00],
       [  8.88835449e-01,  -4.58226522e-01,   8.32998052e+00],
       [ -5.40640817e-01,  -8.41253533e-01,   9.99597663e+00],
       [ -1.00000000e+00,  -4.28626380e-16,   1.09955743e+01]]))
    
    
    
def test_point_track_sq_distance():
    
    t=np.array([[0,0,0],[1,1,1],[2,2,2]],dtype='f4')
    p=np.array([-1,-1.,-1],dtype='f4')
    assert_equal( pf.point_track_sq_distance_check(t,p,.2**2), False)    
    pf.point_track_sq_distance_check(t,p,2**2), True
    t=np.array([[0,0,0],[1,0,0],[2,2,0]],dtype='f4')
    p=np.array([.5,0,0],dtype='f4')
    assert_equal( pf.point_track_sq_distance_check(t,p,.2**2), True)
    p=np.array([.5,1,0],dtype='f4')
    assert_equal( pf.point_track_sq_distance_check(t,p,.2**2), False)
    
def test_track_roi_intersection_check():    
    roi=np.array([[0,0,0],[1,0,0],[2,0,0]],dtype='f4')    
    t=np.array([[0,0,0],[1,1,1],[2,2,2]],dtype='f4')
    assert_equal( pf.track_roi_intersection_check(t,roi,1), True)
    t=np.array([[0,0,0],[1,0,0],[2,2,2]],dtype='f4')
    assert_equal(pf.track_roi_intersection_check(t,roi,1), True)
    t=np.array([[1,1,0],[1,0,0],[1,-1,0]],dtype='f4')
    assert_equal( pf.track_roi_intersection_check(t,roi,1), True)    
    t=np.array([[4,0,0],[4,1,1],[4,2,0]],dtype='f4')
    assert_equal(pf.track_roi_intersection_check(t,roi,1), False)
    
    
def test_minimum_distance():    
    xyz1=np.array([[1,0,0],[2,0,0]],dtype='float32')
    xyz2=np.array([[3,0,0],[4,0,0]],dtype='float32')
    assert_equal(pf.minimum_closest_distance(xyz1,xyz2), 1.0)
    
    
def test_most_similar_mam():
    xyz1 = np.array([[0,0,0],[1,0,0],[2,0,0],[3,0,0]],dtype='float32')
    xyz2 = np.array([[0,1,1],[1,0,1],[2,3,-2]],dtype='float32')
    xyz3 = np.array([[-1,0,0],[2,0,0],[2,3,0],[3,0,0]],dtype='float32')
    tracks=[xyz1,xyz2,xyz3]
    for metric in ('avg', 'min', 'max'):        
        #pf should be much faster and the results equivalent
        si2,s2=pf.most_similar_track_mam(tracks,metric=metric)       
    
    
def test_cut_plane():
    dt = np.dtype(np.float32)
    refx = np.array([[0,0,0],[1,0,0],[2,0,0],[3,0,0]],dtype=dt)
    bundlex = [np.array([[0.5,1,0],[1.5,2,0],[2.5,3,0]],dtype=dt), 
               np.array([[0.5,2,0],[1.5,3,0],[2.5,4,0]],dtype=dt),
               np.array([[0.5,1,1],[1.5,2,2],[2.5,3,3]],dtype=dt),
               np.array([[-0.5,2,-1],[-1.5,3,-2],[-2.5,4,-3]],dtype=dt)]
    expected_hit0 = [
        [ 1.        ,  1.5       ,  0.        ,  0.70710683,  0.        ],
        [ 1.        ,  2.5       ,  0.        ,  0.70710677,  1.        ],
        [ 1.        ,  1.5       ,  1.5       ,  0.81649661,  2.        ]]
    expected_hit1 = [
        [ 2.        ,  2.5       ,  0.        ,  0.70710677,  0.        ],
        [ 2.        ,  3.5       ,  0.        ,  0.70710677,  1.        ],
        [ 2.        ,  2.5       ,  2.5       ,  0.81649655,  2.        ]]
    hitx=pf.cut_plane(bundlex,refx)
    assert_array_almost_equal(hitx[0], expected_hit0)
    assert_array_almost_equal(hitx[1], expected_hit1)
    # check that algorithm allows types other than float32
    bundlex[0] = np.asarray(bundlex[0], dtype=np.float64)
    hitx=pf.cut_plane(bundlex,refx)
    assert_array_almost_equal(hitx[0], expected_hit0)
    assert_array_almost_equal(hitx[1], expected_hit1)
    refx = np.asarray(refx, dtype=np.float64)
    hitx=pf.cut_plane(bundlex,refx)
    assert_array_almost_equal( hitx[0], expected_hit0)
    assert_array_almost_equal( hitx[1], expected_hit1)
 | |
| 
	from pyglet.gl import *
from pyglet import font
from plot_object import PlotObject
from util import strided_range, billboard_matrix
from util import get_direction_vectors
from util import dot_product, vec_sub, vec_mag
from sympy.core.basic import S
class PlotAxes(PlotObject):
    def __init__(self, *args, **kwargs):
        # initialize style parameter
        style = kwargs.pop('style', '').lower()
        # allow alias kwargs to override style kwarg
        if kwargs.pop('none', None) is not None: style = 'none'
        if kwargs.pop('frame', None) is not None: style = 'frame'
        if kwargs.pop('box', None) is not None: style = 'box'
        if kwargs.pop('ordinate', None) is not None: style = 'ordinate'
        if style in ['', 'ordinate']:
            self._render_object = PlotAxesOrdinate(self)
        elif style in ['frame', 'box']:
            self._render_object = PlotAxesFrame(self)
        elif style in ['none']:
            self._render_object = None
        else: raise ValueError(("Unrecognized axes "
                                "style %s.") % (style))
        # initialize stride parameter
        stride = kwargs.pop('stride', 0.25)
        try: stride = eval(stride)
        except: pass
        if isinstance(stride, (list, tuple)):
            assert len(stride) == 3
            self._stride = stride
        else:
            self._stride = [stride, stride, stride]
        self._tick_length = float(kwargs.pop('tick_length', 0.1))
        # setup bounding box and ticks
        self._origin = [0,0,0]
        self.reset_bounding_box()
        def flexible_boolean(input, default):
            if input in [True, False]:
                return input
            if input in ['f','F','false','False']: return False
            if input in ['t','T','true','True']: return True
            return default
        # initialize remaining parameters
        self.visible      =  flexible_boolean(kwargs.pop('visible',''), True)
        self._overlay     =  flexible_boolean(kwargs.pop('overlay',''), True)
        self._colored     =  flexible_boolean(kwargs.pop('colored',''), False)
        self._label_axes  =  flexible_boolean(kwargs.pop('label_axes', ''), False)
        self._label_ticks =  flexible_boolean(kwargs.pop('label_ticks', ''), True)
        # setup label font
        self.font_face = kwargs.pop('font_face', 'Arial')
        self.font_size = kwargs.pop('font_size', 28)
        # this is also used to reinit the
        # font on window close/reopen
        self.reset_resources()
    def reset_resources(self):
        self.label_font = None
    def reset_bounding_box(self):
        self._bounding_box = [[None,None], [None,None], [None,None]]
        self._axis_ticks = [[],[],[]]
    def draw(self):
        if self._render_object:
            glPushAttrib(GL_ENABLE_BIT | GL_POLYGON_BIT | GL_DEPTH_BUFFER_BIT)
            if self._overlay: glDisable(GL_DEPTH_TEST)
            self._render_object.draw()
            glPopAttrib()
    def adjust_bounds(self, child_bounds):
        b = self._bounding_box
        c = child_bounds
        for i in [0,1,2]:
            if abs(c[i][0]) is S.Infinity or abs(c[i][1]) is S.Infinity: continue
            b[i][0] = [ min([b[i][0], c[i][0]]), c[i][0] ][ b[i][0] is None ]
            b[i][1] = [ max([b[i][1], c[i][1]]), c[i][1] ][ b[i][1] is None ]
            self._recalculate_axis_ticks(i)
    def _recalculate_axis_ticks(self, axis):
        b = self._bounding_box
        if b[axis][0] is None or b[axis][1] is None:
            self._axis_ticks[axis] = []
        else:
            self._axis_ticks[axis] = strided_range(b[axis][0], b[axis][1], self._stride[axis])
    def toggle_visible(self):
        self.visible = not self.visible
    def toggle_colors(self):
        self._colored = not self._colored
class PlotAxesBase(PlotObject):
    def __init__(self, parent_axes):
        self._p = parent_axes
    def draw(self):
        color = [  ([0.2,0.1,0.3], [0.2,0.1,0.3], [0.2,0.1,0.3]),
                   ([0.9,0.3,0.5], [0.5,1.0,0.5], [0.3,0.3,0.9])  ][ self._p._colored ]
        self.draw_background(color)
        self.draw_axis(2, color[2])
        self.draw_axis(1, color[1])
        self.draw_axis(0, color[0])
    def draw_background(self, color):
        pass # optional
    def draw_axis(self, axis, color):
        raise NotImplementedError()
    def draw_text(self, text, position, color, scale=1.0):
        if len(color) == 3: color = (color[0], color[1], color[2], 1.0)
        if self._p.label_font is None:
            self._p.label_font = font.load(self._p.font_face,
                                           self._p.font_size,
                                           bold=True, italic=False)
        label = font.Text(self._p.label_font, text,
                          color=color,
                          valign=font.Text.BASELINE,
                          halign=font.Text.CENTER)
        glPushMatrix()
        glTranslatef(*position)
        billboard_matrix()
        scale_factor = 0.005*scale
        glScalef(scale_factor, scale_factor, scale_factor)
        glColor4f(0,0,0,0)
        label.draw()
        glPopMatrix()
    def draw_line(self, v, color):
        o = self._p._origin
        glBegin(GL_LINES)
        glColor3f(*color)
        glVertex3f(v[0][0] + o[0], v[0][1] + o[1], v[0][2] + o[2])
        glVertex3f(v[1][0] + o[0], v[1][1] + o[1], v[1][2] + o[2])
        glEnd()
class PlotAxesOrdinate(PlotAxesBase):
    def __init__(self, parent_axes):
        super(PlotAxesOrdinate, self).__init__(parent_axes)
    def draw_axis(self, axis, color):
        ticks = self._p._axis_ticks[axis]
        radius = self._p._tick_length / 2.0
        if len(ticks) < 2: return
        # calculate the vector for this axis
        axis_lines = [[0,0,0], [0,0,0]]
        axis_lines[0][axis], axis_lines[1][axis] = ticks[0], ticks[-1]
        axis_vector = vec_sub( axis_lines[1], axis_lines[0] )
        # calculate angle to the z direction vector
        pos_z = get_direction_vectors()[2]
        d = abs( dot_product(axis_vector, pos_z) )
        d = d / vec_mag(axis_vector)
        # don't draw labels if we're looking down the axis
        labels_visible = abs(d - 1.0) > 0.02
        # draw the ticks and labels
        for tick in ticks:
            self.draw_tick_line(axis, color, radius, tick, labels_visible)
        # draw the axis line and labels
        self.draw_axis_line(axis, color, ticks[0], ticks[-1], labels_visible)
    def draw_axis_line(self, axis, color, a_min, a_max, labels_visible):
        axis_line = [[0,0,0], [0,0,0]]
        axis_line[0][axis], axis_line[1][axis] = a_min, a_max
        self.draw_line(axis_line, color)
        if labels_visible: self.draw_axis_line_labels(axis, color, axis_line)
    def draw_axis_line_labels(self, axis, color, axis_line):
        if not self._p._label_axes: return
        axis_labels = [axis_line[0][::], axis_line[1][::]]
        axis_labels[0][axis] -= 0.3
        axis_labels[1][axis] += 0.3
        a_str = ['X', 'Y', 'Z'][axis]
        self.draw_text("-" + a_str, axis_labels[0], color)
        self.draw_text("+" + a_str, axis_labels[1], color)
    def draw_tick_line(self, axis, color, radius, tick, labels_visible):
        tick_axis = {0: 1, 1: 0, 2: 1}[axis]
        tick_line = [[0,0,0], [0,0,0]]
        tick_line[0][axis] = tick_line[1][axis] = tick
        tick_line[0][tick_axis], tick_line[1][tick_axis] = -radius, radius
        self.draw_line(tick_line, color)
        if labels_visible: self.draw_tick_line_label(axis, color, radius, tick)
    def draw_tick_line_label(self, axis, color, radius, tick):
        if not self._p._label_axes: return
        tick_label_vector = [0,0,0]
        tick_label_vector[axis] = tick
        tick_label_vector[{0: 1, 1: 0, 2: 1}[axis]] = [-1,1,1][axis]*radius*3.5
        self.draw_text(str(tick), tick_label_vector, color, scale=0.5)
class PlotAxesFrame(PlotAxesBase):
    def __init__(self, parent_axes):
        super(PlotAxesFrame, self).__init__(parent_axes)
    def draw_background(self, color):
        pass
    def draw_axis(self, axis, color):
        raise NotImplementedError()
 | |
| 
	# Copyright 2014 OpenStack Foundation
# All Rights Reserved
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.
import functools
import inspect
from nova.db import base
from nova import hooks
from nova.i18n import _
from nova.network import model as network_model
from nova import objects
from nova.openstack.common import excutils
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@hooks.add_hook('instance_network_info')
def update_instance_cache_with_nw_info(impl, context, instance,
                                       nw_info=None, update_cells=True):
    try:
        if not isinstance(nw_info, network_model.NetworkInfo):
            nw_info = None
        if nw_info is None:
            nw_info = impl._get_instance_nw_info(context, instance)
        LOG.debug('Updating cache with info: %s', nw_info)
        # NOTE(comstud): The save() method actually handles updating or
        # creating the instance.  We don't need to retrieve the object
        # from the DB first.
        ic = objects.InstanceInfoCache.new(context, instance['uuid'])
        ic.network_info = nw_info
        ic.save(update_cells=update_cells)
    except Exception:
        with excutils.save_and_reraise_exception():
            LOG.exception(_('Failed storing info cache'), instance=instance)
def refresh_cache(f):
    """Decorator to update the instance_info_cache
    Requires context and instance as function args
    """
    argspec = inspect.getargspec(f)
    @functools.wraps(f)
    def wrapper(self, context, *args, **kwargs):
        res = f(self, context, *args, **kwargs)
        try:
            # get the instance from arguments (or raise ValueError)
            instance = kwargs.get('instance')
            if not instance:
                instance = args[argspec.args.index('instance') - 2]
        except ValueError:
            msg = _('instance is a required argument to use @refresh_cache')
            raise Exception(msg)
        with lockutils.lock('refresh_cache-%s' % instance['uuid']):
            update_instance_cache_with_nw_info(self, context, instance,
                                               nw_info=res)
        # return the original function's return value
        return res
    return wrapper
SENTINEL = object()
class NetworkAPI(base.Base):
    """Base Network API for doing networking operations.
    New operations available on specific clients must be added here as well.
    """
    def __init__(self, **kwargs):
        super(NetworkAPI, self).__init__(**kwargs)
    def get_all(self, context):
        """Get all the networks for client."""
        raise NotImplementedError()
    def get(self, context, network_uuid):
        """Get specific network for client."""
        raise NotImplementedError()
    def create(self, context, **kwargs):
        """Create a network."""
        raise NotImplementedError()
    def delete(self, context, network_uuid):
        """Delete a specific network."""
        raise NotImplementedError()
    def disassociate(self, context, network_uuid):
        """Disassociate a network for client."""
        raise NotImplementedError()
    def get_fixed_ip(self, context, id):
        """Get fixed ip by id."""
        raise NotImplementedError()
    def get_fixed_ip_by_address(self, context, address):
        """Get fixed ip by address."""
        raise NotImplementedError()
    def get_floating_ip(self, context, id):
        """Get floating ip by id."""
        raise NotImplementedError()
    def get_floating_ip_pools(self, context):
        """Get floating ip pools."""
        raise NotImplementedError()
    def get_floating_ip_by_address(self, context, address):
        """Get floating ip by address."""
        raise NotImplementedError()
    def get_floating_ips_by_project(self, context):
        """Get floating ips by project."""
        raise NotImplementedError()
    def get_floating_ips_by_fixed_address(self, context, fixed_address):
        """Get floating ips by fixed address."""
        raise NotImplementedError()
    def get_instance_id_by_floating_address(self, context, address):
        """Get instance id by floating address."""
        raise NotImplementedError()
    def get_vifs_by_instance(self, context, instance):
        """Get vifs by instance."""
        raise NotImplementedError()
    def get_vif_by_mac_address(self, context, mac_address):
        """Get vif mac address."""
        raise NotImplementedError()
    def allocate_floating_ip(self, context, pool=None):
        """Adds (allocate) floating ip to a project from a pool."""
        raise NotImplementedError()
    def release_floating_ip(self, context, address,
                            affect_auto_assigned=False):
        """Removes (deallocates) a floating ip with address from a project."""
        raise NotImplementedError()
    def disassociate_and_release_floating_ip(self, context, instance,
                                           floating_ip):
        """Removes (deallocates) and deletes the floating ip."""
        raise NotImplementedError()
    def associate_floating_ip(self, context, instance,
                              floating_address, fixed_address,
                              affect_auto_assigned=False):
        """Associates a floating ip with a fixed ip."""
        raise NotImplementedError()
    def disassociate_floating_ip(self, context, instance, address,
                                 affect_auto_assigned=False):
        """Disassociates a floating ip from fixed ip it is associated with."""
        raise NotImplementedError()
    def allocate_for_instance(self, context, instance, vpn,
                              requested_networks, macs=None,
                              security_groups=None,
                              dhcp_options=None):
        """Allocates all network structures for an instance.
        :param context: The request context.
        :param instance: nova.objects.instance.Instance object.
        :param vpn: A boolean, if True, indicate a vpn to access the instance.
        :param requested_networks: A dictionary of requested_networks,
            Optional value containing network_id, fixed_ip, and port_id.
        :param macs: None or a set of MAC addresses that the instance
            should use. macs is supplied by the hypervisor driver (contrast
            with requested_networks which is user supplied).
        :param security_groups: None or security groups to allocate for
            instance.
        :param dhcp_options: None or a set of key/value pairs that should
            determine the DHCP BOOTP response, eg. for PXE booting an instance
            configured with the baremetal hypervisor. It is expected that these
            are already formatted for the neutron v2 api.
            See nova/virt/driver.py:dhcp_options_for_instance for an example.
        :returns: network info as from get_instance_nw_info() below
        """
        raise NotImplementedError()
    def deallocate_for_instance(self, context, instance,
                                requested_networks=None):
        """Deallocates all network structures related to instance."""
        raise NotImplementedError()
    def allocate_port_for_instance(self, context, instance, port_id,
                                   network_id=None, requested_ip=None):
        """Allocate port for instance."""
        raise NotImplementedError()
    def deallocate_port_for_instance(self, context, instance, port_id):
        """Deallocate port for instance."""
        raise NotImplementedError()
    def list_ports(self, *args, **kwargs):
        """List ports."""
        raise NotImplementedError()
    def show_port(self, *args, **kwargs):
        """Show specific port."""
        raise NotImplementedError()
    def add_fixed_ip_to_instance(self, context, instance, network_id):
        """Adds a fixed ip to instance from specified network."""
        raise NotImplementedError()
    def remove_fixed_ip_from_instance(self, context, instance, address):
        """Removes a fixed ip from instance from specified network."""
        raise NotImplementedError()
    def add_network_to_project(self, context, project_id, network_uuid=None):
        """Force adds another network to a project."""
        raise NotImplementedError()
    def associate(self, context, network_uuid, host=SENTINEL,
                  project=SENTINEL):
        """Associate or disassociate host or project to network."""
        raise NotImplementedError()
    def get_instance_nw_info(self, context, instance, **kwargs):
        """Returns all network info related to an instance."""
        if isinstance(instance, dict):
            instance_uuid = instance['uuid']
        else:
            instance_uuid = instance.uuid
        with lockutils.lock('refresh_cache-%s' % instance_uuid):
            # NOTE(danms): Several places in the code look up instances without
            # pulling system_metadata for performance, and call this function.
            # If we get an instance without it, re-fetch so that the call
            # to network_api (which requires it for instance_type) will
            # succeed.
            attrs = ['system_metadata']
            use_slave = kwargs.get('use_slave', False)
            # NOTE(Rui Chen): Refresh instance in order to avoid race
            # condition between booting/attaching_interface and
            # nova/neutron event reporting mechanism. See details:
            # https://bugs.launchpad.net/nova/+bug/1407664
            instance = objects.Instance.get_by_uuid(context,
                                                    instance_uuid,
                                                    expected_attrs=attrs,
                                                    use_slave=use_slave)
            result = self._get_instance_nw_info(context, instance, **kwargs)
            # NOTE(comstud): Don't update API cell with new info_cache every
            # time we pull network info for an instance.  The periodic healing
            # of info_cache causes too many cells messages.  Healing the API
            # will happen separately.
            update_instance_cache_with_nw_info(self, context, instance,
                                               nw_info=result,
                                               update_cells=False)
        return result
    def _get_instance_nw_info(self, context, instance, **kwargs):
        raise NotImplementedError()
    def create_pci_requests_for_sriov_ports(self, context,
                                            pci_requests,
                                            requested_networks):
        """Check requested networks for any SR-IOV port request.
        Create a PCI request object for each SR-IOV port, and add it to the
        pci_requests object that contains a list of PCI request object.
        """
        raise NotImplementedError()
    def validate_networks(self, context, requested_networks, num_instances):
        """validate the networks passed at the time of creating
        the server.
        Return the number of instances that can be successfully allocated
        with the requested network configuration.
        """
        raise NotImplementedError()
    def get_instance_uuids_by_ip_filter(self, context, filters):
        """Returns a list of dicts in the form of
        {'instance_uuid': uuid, 'ip': ip} that matched the ip_filter
        """
        raise NotImplementedError()
    def get_dns_domains(self, context):
        """Returns a list of available dns domains.
        These can be used to create DNS entries for floating ips.
        """
        raise NotImplementedError()
    def add_dns_entry(self, context, address, name, dns_type, domain):
        """Create specified DNS entry for address."""
        raise NotImplementedError()
    def modify_dns_entry(self, context, name, address, domain):
        """Create specified DNS entry for address."""
        raise NotImplementedError()
    def delete_dns_entry(self, context, name, domain):
        """Delete the specified dns entry."""
        raise NotImplementedError()
    def delete_dns_domain(self, context, domain):
        """Delete the specified dns domain."""
        raise NotImplementedError()
    def get_dns_entries_by_address(self, context, address, domain):
        """Get entries for address and domain."""
        raise NotImplementedError()
    def get_dns_entries_by_name(self, context, name, domain):
        """Get entries for name and domain."""
        raise NotImplementedError()
    def create_private_dns_domain(self, context, domain, availability_zone):
        """Create a private DNS domain with nova availability zone."""
        raise NotImplementedError()
    def create_public_dns_domain(self, context, domain, project=None):
        """Create a public DNS domain with optional nova project."""
        raise NotImplementedError()
    def setup_networks_on_host(self, context, instance, host=None,
                                                        teardown=False):
        """Setup or teardown the network structures on hosts related to
           instance.
        """
        raise NotImplementedError()
    def migrate_instance_start(self, context, instance, migration):
        """Start to migrate the network of an instance."""
        raise NotImplementedError()
    def migrate_instance_finish(self, context, instance, migration):
        """Finish migrating the network of an instance."""
        raise NotImplementedError()
    def setup_instance_network_on_host(self, context, instance, host):
        """Setup network for specified instance on host.
        :param context: The request context.
        :param instance: nova.objects.instance.Instance object.
        :param host: The host which network should be setup for instance.
        """
        raise NotImplementedError()
    def cleanup_instance_network_on_host(self, context, instance, host):
        """Cleanup network for specified instance on host.
        :param context: The request context.
        :param instance: nova.objects.instance.Instance object.
        :param host: The host which network should be cleanup for instance.
        """
        raise NotImplementedError()
 | |
| 
	"""
Support for Yamaha Receivers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.yamaha/
"""
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
    SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET,
    SUPPORT_SELECT_SOURCE, SUPPORT_PLAY_MEDIA, SUPPORT_PAUSE, SUPPORT_STOP,
    SUPPORT_NEXT_TRACK, SUPPORT_PREVIOUS_TRACK, SUPPORT_PLAY,
    MEDIA_TYPE_MUSIC,
    MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.const import (CONF_NAME, CONF_HOST, STATE_OFF, STATE_ON,
                                 STATE_PLAYING, STATE_IDLE)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['rxv==0.4.0']
_LOGGER = logging.getLogger(__name__)
SUPPORT_YAMAHA = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
    SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE | SUPPORT_PLAY
CONF_SOURCE_NAMES = 'source_names'
CONF_SOURCE_IGNORE = 'source_ignore'
CONF_ZONE_IGNORE = 'zone_ignore'
DEFAULT_NAME = 'Yamaha Receiver'
KNOWN = 'yamaha_known_receivers'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
    vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
    vol.Optional(CONF_HOST): cv.string,
    vol.Optional(CONF_SOURCE_IGNORE, default=[]):
        vol.All(cv.ensure_list, [cv.string]),
    vol.Optional(CONF_ZONE_IGNORE, default=[]):
        vol.All(cv.ensure_list, [cv.string]),
    vol.Optional(CONF_SOURCE_NAMES, default={}): {cv.string: cv.string},
})
def setup_platform(hass, config, add_devices, discovery_info=None):
    """Set up the Yamaha platform."""
    import rxv
    # keep track of configured receivers so that we don't end up
    # discovering a receiver dynamically that we have static config
    # for.
    if hass.data.get(KNOWN, None) is None:
        hass.data[KNOWN] = set()
    name = config.get(CONF_NAME)
    host = config.get(CONF_HOST)
    source_ignore = config.get(CONF_SOURCE_IGNORE)
    source_names = config.get(CONF_SOURCE_NAMES)
    zone_ignore = config.get(CONF_ZONE_IGNORE)
    if discovery_info is not None:
        name = discovery_info.get('name')
        model = discovery_info.get('model_name')
        ctrl_url = discovery_info.get('control_url')
        desc_url = discovery_info.get('description_url')
        if ctrl_url in hass.data[KNOWN]:
            _LOGGER.info("%s already manually configured", ctrl_url)
            return
        receivers = rxv.RXV(
            ctrl_url, model_name=model, friendly_name=name,
            unit_desc_url=desc_url).zone_controllers()
        _LOGGER.info("Receivers: %s", receivers)
        # when we are dynamically discovered config is empty
        zone_ignore = []
    elif host is None:
        receivers = []
        for recv in rxv.find():
            receivers.extend(recv.zone_controllers())
    else:
        ctrl_url = "http://{}:80/YamahaRemoteControl/ctrl".format(host)
        receivers = rxv.RXV(ctrl_url, name).zone_controllers()
    for receiver in receivers:
        if receiver.zone not in zone_ignore:
            hass.data[KNOWN].add(receiver.ctrl_url)
            add_devices([
                YamahaDevice(name, receiver, source_ignore, source_names)
            ], True)
class YamahaDevice(MediaPlayerDevice):
    """Representation of a Yamaha device."""
    def __init__(self, name, receiver, source_ignore, source_names):
        """Initialize the Yamaha Receiver."""
        self._receiver = receiver
        self._muted = False
        self._volume = 0
        self._pwstate = STATE_OFF
        self._current_source = None
        self._source_list = None
        self._source_ignore = source_ignore or []
        self._source_names = source_names or {}
        self._reverse_mapping = None
        self._playback_support = None
        self._is_playback_supported = False
        self._play_status = None
        self._name = name
        self._zone = receiver.zone
    def update(self):
        """Get the latest details from the device."""
        self._play_status = self._receiver.play_status()
        if self._receiver.on:
            if self._play_status is None:
                self._pwstate = STATE_ON
            elif self._play_status.playing:
                self._pwstate = STATE_PLAYING
            else:
                self._pwstate = STATE_IDLE
        else:
            self._pwstate = STATE_OFF
        self._muted = self._receiver.mute
        self._volume = (self._receiver.volume / 100) + 1
        if self.source_list is None:
            self.build_source_list()
        current_source = self._receiver.input
        self._current_source = self._source_names.get(
            current_source, current_source)
        self._playback_support = self._receiver.get_playback_support()
        self._is_playback_supported = self._receiver.is_playback_supported(
            self._current_source)
    def build_source_list(self):
        """Build the source list."""
        self._reverse_mapping = {alias: source for source, alias in
                                 self._source_names.items()}
        self._source_list = sorted(
            self._source_names.get(source, source) for source in
            self._receiver.inputs()
            if source not in self._source_ignore)
    @property
    def name(self):
        """Return the name of the device."""
        name = self._name
        if self._zone != "Main_Zone":
            # Zone will be one of Main_Zone, Zone_2, Zone_3
            name += " " + self._zone.replace('_', ' ')
        return name
    @property
    def state(self):
        """Return the state of the device."""
        return self._pwstate
    @property
    def volume_level(self):
        """Volume level of the media player (0..1)."""
        return self._volume
    @property
    def is_volume_muted(self):
        """Boolean if volume is currently muted."""
        return self._muted
    @property
    def source(self):
        """Return the current input source."""
        return self._current_source
    @property
    def source_list(self):
        """List of available input sources."""
        return self._source_list
    @property
    def supported_features(self):
        """Flag media player features that are supported."""
        supported_features = SUPPORT_YAMAHA
        supports = self._playback_support
        mapping = {'play': (SUPPORT_PLAY | SUPPORT_PLAY_MEDIA),
                   'pause': SUPPORT_PAUSE,
                   'stop': SUPPORT_STOP,
                   'skip_f': SUPPORT_NEXT_TRACK,
                   'skip_r': SUPPORT_PREVIOUS_TRACK}
        for attr, feature in mapping.items():
            if getattr(supports, attr, False):
                supported_features |= feature
        return supported_features
    def turn_off(self):
        """Turn off media player."""
        self._receiver.on = False
    def set_volume_level(self, volume):
        """Set volume level, range 0..1."""
        receiver_vol = 100 - (volume * 100)
        negative_receiver_vol = -receiver_vol
        self._receiver.volume = negative_receiver_vol
    def mute_volume(self, mute):
        """Mute (true) or unmute (false) media player."""
        self._receiver.mute = mute
    def turn_on(self):
        """Turn the media player on."""
        self._receiver.on = True
        self._volume = (self._receiver.volume / 100) + 1
    def media_play(self):
        """Send play commmand."""
        self._call_playback_function(self._receiver.play, "play")
    def media_pause(self):
        """Send pause command."""
        self._call_playback_function(self._receiver.pause, "pause")
    def media_stop(self):
        """Send stop command."""
        self._call_playback_function(self._receiver.stop, "stop")
    def media_previous_track(self):
        """Send previous track command."""
        self._call_playback_function(self._receiver.previous, "previous track")
    def media_next_track(self):
        """Send next track command."""
        self._call_playback_function(self._receiver.next, "next track")
    def _call_playback_function(self, function, function_text):
        import rxv
        try:
            function()
        except rxv.exceptions.ResponseException:
            _LOGGER.warning(
                "Failed to execute %s on %s", function_text, self._name)
    def select_source(self, source):
        """Select input source."""
        self._receiver.input = self._reverse_mapping.get(source, source)
    def play_media(self, media_type, media_id, **kwargs):
        """Play media from an ID.
        This exposes a pass through for various input sources in the
        Yamaha to direct play certain kinds of media. media_type is
        treated as the input type that we are setting, and media id is
        specific to it.
        """
        if media_type == "NET RADIO":
            self._receiver.net_radio(media_id)
    @property
    def media_artist(self):
        """Artist of current playing media."""
        if self._play_status is not None:
            return self._play_status.artist
    @property
    def media_album_name(self):
        """Album of current playing media."""
        if self._play_status is not None:
            return self._play_status.album
    @property
    def media_content_type(self):
        """Content type of current playing media."""
        # Loose assumption that if playback is supported, we are playing music
        if self._is_playback_supported:
            return MEDIA_TYPE_MUSIC
        return None
    @property
    def media_title(self):
        """Artist of current playing media."""
        if self._play_status is not None:
            song = self._play_status.song
            station = self._play_status.station
            # If both song and station is available, print both, otherwise
            # just the one we have.
            if song and station:
                return '{}: {}'.format(station, song)
            return song or station
 | |
| 
	#!/usr/bin/python
#  This library is free software; you can redistribute it and/or
#  modify it under the terms of the GNU General Public
#  License as published by the Free Software Foundation; either
#  version 3.0 of the License, or (at your option) any later version.
#
#  The library is distributed in the hope that it will be useful,
#  but WITHOUT ANY WARRANTY; without even the implied warranty of
#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
#  General Public License for more details.
#
# (c) Sam Burden, Berkeley 2012 
import numpy as np
import pylab as plt
import os
from glob import glob
np.set_printoptions(precision=2)
dbg = False
# optimization libraries
try:
  import scipy as sp
except ImportError:
  if dbg:
    print 'WARNING: scipy not found'
  sp = None
try:
  import nlopt
except ImportError:
  if dbg:
    print 'WARNING: nlopt not found'
  nlopt = None
try:
  import openopt as oo
except ImportError:
  if dbg:
    print 'WARNING: openopt not found'
  oo = None
try:
  import optutil as ou
except ImportError:
  if dbg:
    print 'WARNING: optutil not found'
  ou = None
# default environment for evaluating parameter files
env = {'np':np,'array':np.array,
       'scipy':sp,
       'nlopt':nlopt,
       'openopt':oo,
       'optutil':ou,
       '__builtins__':__builtins__}
def bd(x,xm,xM,dbg=True):
  """
  Project x to keep it within bounds xm,xM
  Inputs:
    x - array - state
    xm,xM - array - elementwise min,max on x
  Outputs:
    x_ - array - xm < x < xM
  by Sam Burden 2012
  """
  x_ = np.asarray(x,dtype=np.float).copy(); 
  xm = np.asarray(xm,dtype=np.float); 
  xM = np.asarray(xM,dtype=np.float);
  jm = (x_ < xm).nonzero()[0]
  if jm.size > 0:
    if dbg:
      print 'WARNING: x%s < xm%s' % (list(jm),list(jm))
    x_[jm] = xm[jm]
  jM = (x_ > xM).nonzero()[0]
  if jM.size > 0:
    if dbg:
      print 'WARNING: x%s > xM%s' % (list(jM),list(jM))
    x_[jM] = xM[jM]
  return x_
class Opt(object):
  """
  Optimization superclass
  Usage:
    opt = Opt('pars.py') # pars.py specifies optimization parameters
    while opt.f(opt.x) > 1e-6:
      # update opt.x
      opt.unpack(x)
      print opt
  """
  def __init__( self,pth='' ):
    """
    Opt(pth)  creates optimization instance in directory / file given by pth
    Inputs:
      pth - str - parameter .py file or directory containing only one .py file
    """
    self.p = {}
    if pth and os.path.exists(pth):
      # sanitize filename
      if os.path.isdir(pth):
        fis = glob( os.path.join(pth,'*.py') )
        assert len(fis) == 1 # only one .py in directory ?
        fi = fis[0]
      else:
        fi = pth
      self.di,self.fi = os.path.split(fi)
      self.pars(os.path.join(self.fi))
  def pars( self,fi='',p0=env,**p ):
    """
    pars(fi)  reads parameters from .py file fi
    pars(**p) reads parameters from keyword arguments
    Inputs:
      (optional)
      fi - str - parameter .py file
      p0 - dict - initial parameter environment to use when exec'ing fi 
      **p - keywords - additional parameters specified as keyword arguments 
    Outputs:
      p - dict - current optimization params
    Effects:
      - exec's fi in self.p environment
      - runs self.sanitize on self.p
      - runs self.pack on self.p
    """
    if fi or p:
      self.p.update(p0)
      self.p.update(p)
      if fi:
        code = compile(open(fi).read(),fi,'exec')
        exec code in self.p
      self.p = self.sanitize(self.p,rm=p0.keys())
      if 'opt' in self.p.keys() and self.p['opt']:
        self.vars = self.p['opt']['vars']
        if 'cost' in self.p['opt']:
          self.cost = self.p['opt']['cost']
        self.pack(self.p)
    if hasattr(self, 'x') and hasattr(self, 'vars'):
      return dict([(var,(self.x[self.j[i]:self.j[i+1]])) 
                   for i,var in enumerate(self.vars)])
      
  def sanitize( self,p,rm={} ):
    """
    sanitize(p)  adds and/or resizes _m,_M,_s fields for each var
    Inputs:
      p - parameter dict
      (optional)
      rm - parameters to remove
    Outputs:
      p - sanitized parameter dict
    """
    # remove some keys
    for key in p.keys():
      if key in rm:
        p.pop(key)
    if not 'opt' in p.keys():
      return p
    # sanitize optimization bounds, scale
    for var in p['opt']['vars']:
      # min
      if var+'_m' not in p.keys():
        p[var+'_m'] = -np.inf*np.ones_like(p[var])
      elif not np.asarray(p[var+'_m']).size == np.asarray(p[var]).size:
        p[var+'_m'] = p[var+'_m']*np.ones_like(p[var])
      # max
      if var+'_M' not in p.keys():
        p[var+'_M'] = np.inf*np.ones_like(p[var])
      elif not np.asarray(p[var+'_M']).size == np.asarray(p[var]).size:
        p[var+'_M'] = p[var+'_M']*np.ones_like(p[var])
      # scale
      if var+'_s' not in p.keys():
        # default to unity
        if np.any(np.isinf(np.r_[p[var+'_M'],p[var+'_m']])):
          p[var+'_s'] = 1.*np.ones_like(p[var]) 
        # default to .1*(max - min)
        else:
          p[var+'_s'] = .1*(p[var+'_M']-p[var+'_m'])
      elif not np.asarray(p[var+'_s']).size == np.asarray(p[var]).size:
        p[var+'_s'] = p[var+'_s']*np.ones_like(p[var])
    # sanitized parameters
    return p
  def pack(self, p=None):
    """
    pack(p)  updates optimization variables given parameter dict 
    Inputs:
      p - updated parameter dict
    Outputs:
      x - array - optimization variables
      n - int - x.size
      j - int array - optimization variable indices
      m - array - min, x > m
      M - array - max, x < M
      s - array - scale, x ~ s
    Effects:
      updates self.x,n,j,m,M,s
    """
    if p is None:
      p = self.p
    # unpack optimization params
    a = [np.asarray(p[var]) for var in p['opt']['vars']]
    x = np.hstack(a) # optimization variables
    n = x.size # number of optimization variables
    j = np.hstack( (0,np.cumsum([aa.size for aa in a])) ) # indices
    m = np.hstack([np.asarray(p[var+'_m']) for var in self.vars]) # min
    M = np.hstack([np.asarray(p[var+'_M']) for var in self.vars]) # max
    s = np.hstack([np.asarray(p[var+'_s']) for var in self.vars]) # scale
    self.x = x; self.n = n; self.j = j;
    self.m = m; self.M = M; self.s = s;
    return x,n,j,m,M,s
  def unpack(self, x):
    """
    unpack(x)  updates parameter dict given optimization variables
    Inputs:
      x - array - optimization variables
    Outputs:
      self.p - updated parameter dict
    Effects:
      updates self.p
    """
    q = []
    if np.asarray(x).shape == ():
      x = np.asarray([x])
    for i,var in enumerate(self.p['opt']['vars']):
      if self.j[i+1] == self.j[i]+1:
        q.append((var,x[self.j[i]]))
      else:
        q.append((var,x[self.j[i]:self.j[i+1]]))
    self.p.update(q)
    return self.p
  def cost( self,x,*args ):
    """
    fx = cost(x,*args)  cost function
    Inputs:
      x - array - optimization variables
      (optional)
      args - list - extra parameters
    Outputs:
      fx - scalar - cost function value
    """
    return np.nan
  def __repr__( self ):
    """
    __repr__()  string representation of optimization state
    """
    if hasattr(self,'cost') and hasattr(self,'x'):
      return 'cost(x) = {:0.2f}; x = {!s}'.format( self.cost(self.x), self.x )
    else:
      return 'p = {}'.format(self.p)
class SPOpt(Opt):
  """
  scipy.optimize interface class
  Usage:
    op = SPOpt('pars.py') # pars.py specifies optimization parameters
    op.init()
  """
  dbg = True
  vars_tol = 1e-3
  cost_tol = 1e-3
  solver = None
  res = None
  ftol = 1e-3
  xtol = 1e-3
  def __init__( self,pth='',opt=None ):
    """
    SPOpt(pth)  creates optimization instance
    Inputs:
      pth - str - parameter .py file 
      op  - Opt - object with base class of Opt
    """
    Opt.__init__( self,pth )
    if opt is not None and isinstance(opt,Opt):
      self.pars(**opt.p)
    if 'opt' in self.p:
      self.vars_tol = self.p['opt'].get('vars_tol',self.vars_tol)
      self.cost_tol = self.p['opt'].get('cost_tol',self.cost_tol)
      self.solver   = self.p['opt'].get('solver',self.solver)
      self.ftol     = self.p['opt'].get('ftol',self.ftol)
      self.xtol     = self.p['opt'].get('xtol',self.xtol)
  def cost( self,x,*args ):
    """
    fx = cost(x,*args)  cost function
    Inputs:
      x - N-array - optimization variables
      (optional)
      args - list - extra parameters
    Outputs:
      fx - M-array - cost function values
    """
    return [np.nan]
  def solve( self ):
    """
    solve()  runs self.solver on optimization problem
    Effects:
      - assigns self.res
    """
    bounds = []
    for m,M in zip(self.m,self.M):
      bd = [m,M]
      if np.isinf(m):
        bd[0] = None
      if np.isinf(M):
        bd[1] = None
      bounds.append(bd)
    diag = self.s**-1
    if self.solver == 'leastsq':
      self.res = sp.optimize.leastsq(self.cost,self.x,full_output=True,
                                     xtol=self.xtol,ftol=self.ftol,
                                     diag=diag)
    #res = sp.optimize.fmin_l_bfgs_b(lambda opx : (np.sum(err(opx*op.s)[0]**2) / eta0.size),
    #                                opx1*op.s**-1,
    #                                pgtol=1e-3,bounds=bounds,
    #                                approx_grad=True,epsilon=1e-5)
    #res = sp.optimize.fmin_slsqp(lambda opx : (np.sum(err(opx*op.s)[0]**2) / eta0.size),
    #                             opx1*op.s**-1,
    #                             acc=1e-3,bounds=bounds,
    #                             epsilon=1e-5)
    return self.res
  def __repr__( self ):
    """
    __repr__()  string representation of optimization state
    """
    cost = self.cost(self.x)
    if len(cost) > 1:
      cost = np.sum(cost**2)
    if hasattr(self,'cost') and hasattr(self,'x'):
      return 'cost(x) = {:0.2f}; {!s}'.format( cost, self.pars() )
    else:
      return 'p = {}'.format(self.p)
class NM(Opt):
  """
  Nelder-Mead optimization class
  @article{NelderMead1965,
           Author = {Nelder, J. A. and Mead, R.},
           Journal = {The Computer Journal},
           Number = {4},
           Pages = {308-313},
           Title = {A Simplex Method for Function Minimization},
           Volume = {7},
           Year = {1965}}
  Usage:
    nm = NM('pars.py') # pars.py specifies optimization parameters
    nm.init()
    while nm.step():
      print nm
  """
  dbg = True
  vol = 1e-10
  vars_tol = 1e-3
  cost_tol = 1e-3
  def __init__( self,pth='' ):
    """
    NM(pth)  creates optimization instance in directory / file given by pth
    Inputs:
      pth - str - parameter .py file or directory containing only one .py file
    """
    Opt.__init__( self,pth )
    if 'vars_tol' in self.p['opt'].keys():
      self.vars_tol = self.p['opt']['vars_tol']
    if 'cost_tol' in self.p['opt'].keys():
      self.cost_tol = self.p['opt']['cost_tol']
    if 'vol' in self.p['opt'].keys():
      self.vol = self.p['opt']['vol']
  def init( self,x0=None,fx0=None ):
    """
    init(x0)  initializes Nelder-Mead optimization with simplex x0
    NOTE: if X.csv exists in self.di, simplex will be loaded
    Inputs:
      (optional)
      x0 - (n+1) x n - starting simplex
      fx0 - (n+1) x 1 - cost at starting simplex
    """
    self.k = 0
    x = self.x; s = self.s; p = self.p; n = self.n; di = self.di
    vars_tol = self.vars_tol; cost_tol = self.cost_tol
    if x0 is None:
      # initialize simplex
      x0 = np.vstack(( x, x + np.diag(s*(.5+np.random.rand(n))) ))
    assert x0.shape[1] == n # simplex consistent with parameter file ?
    assert x0.shape[0] >= n + 1 # full starting simplex ?
    if fx0 is not None:
      assert x0.shape[0] == fx0.shape[0] # cost evaluated on whole simplex ?
    X = []; F = []
    # load prior simplex
    pth = os.path.join(di,'X.csv')
    if os.path.exists(pth):
      if self.dbg:
        print 'loading {:s}'.format(pth)
      X = [list(a) for a in np.loadtxt(pth,delimiter=',')]
      assert a.size == n # X.csv compatible with *.py ?
      assert len(X) >= n + 1 # X.csv contains full starting simplex ?
      x0 = X[-(n+1):]
      F = [np.nan for _ in range(len(X))]
      # load prior cost
      pth = os.path.join(di,'F.csv')
      if os.path.exists(pth):
        F = list(np.loadtxt(pth,delimiter=','))
        assert len(X) == len(F) # X.csv compatible with F.csv ?
      fx0 = F[-(n+1):]
      #m = min(len(X),n+1)
      #x0 = np.vstack(( X[-m:], x + s*np.random.rand(n+1-m,n) ))
      #f0 = np.hstack(( F[-m:], np.nan*np.ones(n+1-m) ))
    self.X = X; self.F = F
    #print np.asarray(x0)
    #print np.asarray(fx0)
    self.op = ou.fminIter(x0,f0=fx0,xtol=vars_tol,ftol=cost_tol)
    #1/0
    self.fx = self.op.next()
  def step( self ):
    """
    step()  executes one step of Nelder-Mead optimization
    Effects:
      - saves X.csv,F.csv
    """
    # extract next element from NM iterator
    try:
      self.x = self.op.next()
    except StopIteration:
      return False
    self.k += 1
    # keep params within bounds
    self.x_ = bd( self.x, self.m, self.M, dbg=self.dbg )
    # evaluate cost function; penalize for leaving specified bounds
    fx = np.asarray([self.cost(**self.pars(x=self.x_))]).flatten()
    assert fx.size == 1 # cost function returns scalar ?
    self.fx[-1] = fx[0]
    self.fx[-1] += np.exp(np.sum(np.abs(self.x_ - self.x) / self.s)) - 1.
    # store history
    self.X.append(list(self.x))
    self.F.append(self.fx[-1])
    # check that simplex has volume
    n = self.n
    if len(self.X) >= n:
      _,s,_ = np.linalg.svd(self.X[-n:])
      r = np.sum(s > self.vol)
      if r < n and self.dbg:
        print 'WARNING: simplex has rank %d < %d = dim x' % (r,n)
    return True
  def __repr__( self ):
    """
    __repr__()  string representation of optimization state
    """
    return 'k = {:4d}; f = {:0.2e}; p = {:s}'.format( 
            self.k, self.fx[-1], self.pars())
  def save( self ):
    """
    save()  save nm progress
    Effects:
      - saves X.csv and F.csv in same directory as self.fi
    """
    np.savetxt(os.path.join(self.di,'X.csv'),self.X,delimiter=',')
    np.savetxt(os.path.join(self.di,'F.csv'),self.F,delimiter=',')
if __name__ == "__main__":
  pth = 'pars.py'
  import sys
  if len(sys.argv) > 1:
    pth = sys.argv[1]
  # random initial simplex
  nm = NM(pth)
  nm.init()
  while nm.step() and nm.k <= 100:
    print nm
  nm.save()
  print np.asarray(nm.X[-(nm.n+1):])
  print np.asarray(nm.F[-(nm.n+1):])
  # load initial simplex
  nm = NM(pth)
  nm.vars_tol *= .01
  nm.init()
  print np.asarray(nm.X[-(nm.n+1):])
  print np.asarray(nm.F[-(nm.n+1):])
  while nm.step() and nm.k <= 100:
    print nm
 | |
| 
	from ast import literal_eval
from collections.abc import Mapping
from contextlib import contextmanager
from time import time
import random
import redis
from findig.context import ctx
from findig.resource import AbstractResource
from findig.tools.dataset import MutableDataSet, MutableRecord, FilteredDataSet
class IndexToken(Mapping):
    __slots__ = 'sz', 'fields'
    def __init__(self, fields, bytesize=4):
        self.fields = fields
        self.sz = bytesize
    def __str__(self):
        return ",".join("{}={!r}".format(k, self.fields[k])
                        for k in sorted(self.fields))
    def __hash__(self):
        return hash(str(self)) & (2**(8*self.sz - 1) - 1)
    def __iter__(self):
        yield from self.fields
    def __len__(self):
        return len(self.fields)
    def __getitem__(self, key):
        return self.fields[key]
    @property
    def value(self):
        return hash(self).to_bytes(self.sz, 'big')
class RedisObj(MutableRecord):
    def __init__(self, key, collection=None, include_id=True):
        self.itemkey = key
        self.collection = collection
        self.include_id = include_id
        self.r = (collection.r
                  if collection is not None
                  else redis.StrictRedis())
        self.inblock = False
    def __repr__(self):
        return "<{name}({key!r}){suffix}>".format(
            name="redis-object" if self.collection is None else "item",
            key=self.itemkey,
            suffix="" if self.collection is None
                   else " of {!r}".format(self.collection)
        )
    def start_edit_block(self):
        client = self.r
        self.r = self.r.pipeline()
        self.inblock = True
        return (client, dict(self))
    def close_edit_block(self, token):
        client, old_data = token
        ret = self.r.execute()
        self.r = client
        data = dict(self)
        if self.collection is not None:
            self.collection.reindex(
                self.id,
                data,
                old_data
            )
        self.invalidate(new_data=data)
        self.inblock = False
        return ret
    def patch(self, add_data, remove_fields, replace=False):
        p = self.r.pipeline()
        if not self.inblock:
            old_data = dict(self)
        if replace:
            p.delete(self.itemkey)
        elif remove_fields:
            p.hdel(self.itemkey, *remove_fields)
        self.store(add_data, self.itemkey, p)
        p.execute()
        if self.inblock:
            data = {k: old_data[k] for k in old_data
                    if k not in remove_fields}
            data.update(add_data)
            self.invalidate(new_data=data)
            if self.collection is not None:
                self.collection.reindex(self.id, data, old_data)
        else:
            self.invalidate()
    def read(self):
        data = self.r.hgetall(self.itemkey)
        if self.include_id:
            data[b'id'] = self.id.encode("utf8")
        return {
            k.decode('utf8'): literal_eval(v.decode('utf8'))
            for k, v in data.items()
        }
    def delete(self):
        if self.collection is not None:
            self.collection.remove_from_index(self.id, self)
            self.collection.untrack_id(self.id)
        self.r.delete(self.itemkey)
    @staticmethod
    def store(data, key, client):
        data = {
            k: repr(v).encode('utf8')
            for k, v in data.items()
        }
        return client.hmset(key, data)
    @property
    def id(self):
        return self.itemkey.rpartition(":")[-1]
class RedisSet(MutableDataSet):
    """
    RedisSet(key=None, client=None, index_size=4)
    A RedisSet is an :class:`AbstractDataSet` that stores its items in
    a Redis database (using a Sorted Set to represent the collection,
    and a sorted set to represent items).
    :param key: The base key that should be used for the sorted set. If
        not given, one is deterministically generated based on the current
        resource.
    :param client: A :class:`redis.StrictRedis` instance that should be
        used to communicate with the redis server. If not given, a default
        instance is used.
    :param index_size: The number of bytes to use to index items in the
        set (per item).
    """
    def __init__(self, key=None, client=None, **args):
        if key is None:
            key = ctx.resource
        if isinstance(key, AbstractResource):
            key = "findig:resource:{}".format(key.name)
        self.colkey = key
        self.itemkey = self.colkey + ':item:{id}'
        self.indkey = self.colkey + ':index'
        self.incrkey = self.colkey + ':next-id'
        self.genid = args.pop(
            'generate_id',
            lambda d: self.r.incr(self.incrkey)
        )
        self.indsize = args.pop('index_size', 4)
        self.filterby = args.pop('filterby', {})
        self.indexby = args.pop('candidate_keys', [('id',)])
        self.include_ids = args.pop('include_ids', True)
        self.r = redis.StrictRedis() if client is None else client
    def __repr__(self):
        if self.filterby:
            name = "filtered-redis-view"
            suffix = "|{}".format(
                ",".join(
                    "{}={!r}".format(k, v)
                    for k, v in self.filterby.items()
                )
            )
        else:
            name = "redis-set"
            suffix = ""
        return "<{name}({key!r}){suffix}>".format(
            name=name, suffix=suffix, key=self.colkey
        )
    def __iter__(self):
        """Query the set and iterate through the elements."""
        # If there is a filter, and it is completely encapsulated by
        # our index, we can use that to iter through the items
        tokens = self.__buildindextokens(self.filterby, raise_err=False)
        if tokens:
            # Pick an index to scan
            token = random.choice(tokens)
            id_blobs = self.r.zrangebylex(
                self.indkey, token.value, token.value)
            ids = [bs[self.indsize:] for bs in id_blobs]
        else:
            ids = self.r.zrange(self.colkey, 0, -1)
        for id in map(lambda bs: bs.decode('ascii'), ids):
            itemkey = self.itemkey.format(id=id)
            if self.filterby:
                # Check the items against the filter if it was
                # specified
                data = RedisObj(itemkey, self, self.include_ids)
                if FilteredDataSet.check_match(data, self.filterby):
                    yield data
            else:
                yield RedisObj(itemkey, self, self.include_ids)
    def add(self, data):
        id = str(data['id'] if 'id' in data else self.genid(data))
        itemkey = self.itemkey.format(id=id)
        with self.group_redis_commands():
            tokens = self.add_to_index(id, data)
            self.track_id(id)
            RedisObj.store(data, itemkey, self.r)
        return tokens[0]
    def fetch_now(self, **spec):
        if list(spec) == ['id']:
            # Fetching by ID only; just lookup the item according to its
            # key
            itemkey = self.itemkey.format(id=spec['id'])
            if not self.r.exists(itemkey):
                raise LookupError("No matching item found.")
            else:
                return RedisObj(itemkey, self)
        else:
            return super(RedisSet, self).fetch_now(**spec)
    def track_id(self, id):
        self.r.zadd(self.colkey, time(), id)
    def untrack_id(self, id):
        self.r.zrem(self.colkey, id)
    def remove_from_index(self, id, data):
        tokens = self.__buildindextokens(data, id, False)
        for token in tokens:
            self.r.zrem(
                self.indkey,
                token.value + id.encode('ascii')
            )
    def add_to_index(self, id, data):
        tokens = self.__buildindextokens(data, id)
        for token in tokens:
            self.r.zadd(
                self.indkey,
                0,
                token.value + id.encode('ascii')
            )
        return tokens
    def reindex(self, id, data, old_data):
        with self.group_redis_commands():
            self.remove_from_index(id, data)
            self.add_to_index(id, data)
    def clear(self):
        # Remove all the child objects
        for_removal = list(self)
        with self.group_redis_commands():
            for obj in for_removal:
                obj.delete()
            self.r.delete(self.incrkey)
        # Delete all the redis structures
        # Technically this step shouldn't be necessary;
        # Redis should clean up the other data structures
    def filtered(self, **spec):
        filter = dict(self.filterby)
        filter.update(spec)
        args = {
            'key': self.colkey,
            'candidate_keys': self.indexby,
            'index_size': self.indsize,
            'filterby': filter,
            'client': self.r,
        }
        return RedisSet(**args)
    @contextmanager
    def group_redis_commands(self):
        client = self.r
        self.r = client.pipeline()
        yield
        self.r.execute()
        self.r = client
    def __buildindextokens(self, data, generated_id=None, raise_err=True):
        index = []
        for ind in self.indexby:
            mapping = {}
            for field in ind:
                if field in data:
                    mapping[field] = data[field]
                elif field == 'id' and generated_id is not None:
                    # special case
                    mapping[field] = generated_id
                else:
                    # Can't use this index
                    break
            else:
                index.append(IndexToken(mapping, self.indsize))
        if not index:
            if raise_err:
                raise ValueError("Could not index this data. "
                                 "This may be due to insuffient index keys "
                                 "or incomplete data."
                                 )
            else:
                return []
        else:
            return index
__all__ = ["RedisSet"]
 | |
| 
	# This file is part of QuTiP: Quantum Toolbox in Python.
#
#    Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
#    All rights reserved.
#
#    Redistribution and use in source and binary forms, with or without
#    modification, are permitted provided that the following conditions are
#    met:
#
#    1. Redistributions of source code must retain the above copyright notice,
#       this list of conditions and the following disclaimer.
#
#    2. Redistributions in binary form must reproduce the above copyright
#       notice, this list of conditions and the following disclaimer in the
#       documentation and/or other materials provided with the distribution.
#
#    3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
#       of its contributors may be used to endorse or promote products derived
#       from this software without specific prior written permission.
#
#    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
#    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
#    PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#    HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
import numpy as np
import warnings
from qutip import tensor, identity, destroy, sigmax, sigmaz, basis
from qutip.qip.circuit import QubitCircuit, Gate
from qutip.qip.models.circuitprocessor import CircuitProcessor
class DispersivecQED(CircuitProcessor):
    """
    Representation of the physical implementation of a quantum
    program/algorithm on a dispersive cavity-QED system.
    """
    def __init__(self, N, correct_global_phase=True, Nres=None, deltamax=None,
                 epsmax=None, w0=None, wq=None, eps=None, delta=None, g=None):
        """
        Parameters
        ----------
        Nres: Integer
            The number of energy levels in the resonator.
        deltamax: Integer/List
            The sigma-x coefficient for each of the qubits in the system.
        epsmax: Integer/List
            The sigma-z coefficient for each of the qubits in the system.
        wo: Integer
            The base frequency of the resonator.
        wq: Integer/List
            The frequency of the qubits.
        eps: Integer/List
            The epsilon for each of the qubits in the system.
        delta: Integer/List
            The epsilon for each of the qubits in the system.
        g: Integer/List
            The interaction strength for each of the qubit with the resonator.
        """
        super(DispersivecQED, self).__init__(N, correct_global_phase)
        # user definable
        if Nres is None:
            self.Nres = 10
        else:
            self.Nres = Nres
        if deltamax is None:
            self.sx_coeff = np.array([1.0 * 2 * np.pi] * N)
        elif not isinstance(deltamax, list):
            self.sx_coeff = np.array([deltamax * 2 * np.pi] * N)
        else:
            self.sx_coeff = np.array(deltamax)
        if epsmax is None:
            self.sz_coeff = np.array([9.5 * 2 * np.pi] * N)
        elif not isinstance(epsmax, list):
            self.sz_coeff = np.array([epsmax * 2 * np.pi] * N)
        else:
            self.sz_coeff = np.array(epsmax)
        if w0 is None:
            self.w0 = 10 * 2 * np.pi
        else:
            self.w0 = w0
        if eps is None:
            self.eps = np.array([9.5 * 2 * np.pi] * N)
        elif not isinstance(eps, list):
            self.eps = np.array([eps * 2 * np.pi] * N)
        else:
            self.eps = np.array(eps)
        if delta is None:
            self.delta = np.array([0.0 * 2 * np.pi] * N)
        elif not isinstance(delta, list):
            self.delta = np.array([delta * 2 * np.pi] * N)
        else:
            self.delta = np.array(delta)
        if g is None:
            self.g = np.array([0.01 * 2 * np.pi] * N)
        elif not isinstance(g, list):
            self.g = np.array([g * 2 * np.pi] * N)
        else:
            self.g = np.array(g)
        if wq is not None:
            if not isinstance(wq, list):
                self.wq = np.array([wq] * N)
            else:
                self.wq = np.array(wq)
        if wq is None:
            if eps is None:
                self.eps = np.array([9.5 * 2 * np.pi] * N)
            elif not isinstance(eps, list):
                self.eps = np.array([eps] * N)
            else:
                self.eps = np.array(eps)
            if delta is None:
                self.delta = np.array([0.0 * 2 * np.pi] * N)
            elif not isinstance(delta, list):
                self.delta = np.array([delta] * N)
            else:
                self.delta = np.array(delta)
        # computed
        self.wq = np.sqrt(self.eps ** 2 + self.delta ** 2)
        self.Delta = self.wq - self.w0
        # rwa/dispersive regime tests
        if any(self.g / (self.w0 - self.wq) > 0.05):
            warnings.warn("Not in the dispersive regime")
        if any((self.w0 - self.wq) / (self.w0 + self.wq) > 0.05):
            warnings.warn(
                "The rotating-wave approximation might not be valid.")
        self.sx_ops = [tensor([identity(self.Nres)] +
                              [sigmax() if m == n else identity(2)
                               for n in range(N)])
                       for m in range(N)]
        self.sz_ops = [tensor([identity(self.Nres)] +
                              [sigmaz() if m == n else identity(2)
                               for n in range(N)])
                       for m in range(N)]
        self.a = tensor([destroy(self.Nres)] + [identity(2) for n in range(N)])
        self.cavityqubit_ops = []
        for n in range(N):
            sm = tensor([identity(self.Nres)] +
                        [destroy(2) if m == n else identity(2)
                         for m in range(N)])
            self.cavityqubit_ops.append(self.a.dag() * sm + self.a * sm.dag())
        self.psi_proj = tensor([basis(self.Nres, 0)] +
                               [identity(2) for n in range(N)])
    def get_ops_and_u(self):
        H0 = self.a.dag() * self.a
        return ([H0] + self.sx_ops + self.sz_ops + self.cavityqubit_ops,
                np.hstack((self.w0 * np.zeros((self.sx_u.shape[0], 1)),
                          self.sx_u, self.sz_u, self.g_u)))
    def get_ops_labels(self):
        return ([r"$a^\dagger a$"] +
                [r"$\sigma_x^%d$" % n for n in range(self.N)] +
                [r"$\sigma_z^%d$" % n for n in range(self.N)] +
                [r"$g_{%d}$" % (n) for n in range(self.N)])
    def optimize_circuit(self, qc):
        self.qc0 = qc
        self.qc1 = self.qc0.resolve_gates(basis=["ISWAP", "RX", "RZ"])
        self.qc2 = self.dispersive_gate_correction(self.qc1)
        return self.qc2
    def eliminate_auxillary_modes(self, U):
        return self.psi_proj.dag() * U * self.psi_proj
    def dispersive_gate_correction(self, qc1, rwa=True):
        """
        Method to resolve ISWAP and SQRTISWAP gates in a cQED system by adding
        single qubit gates to get the correct output matrix.
        Parameters
        ----------
        qc: Qobj
            The circular spin chain circuit to be resolved
        rwa: Boolean
            Specify if RWA is used or not.
        Returns
        ----------
        qc: QubitCircuit
            Returns QubitCircuit of resolved gates for the qubit circuit in the
            desired basis.
        """
        qc = QubitCircuit(qc1.N, qc1.reverse_states)
        for gate in qc1.gates:
            qc.gates.append(gate)
            if rwa:
                if gate.name == "SQRTISWAP":
                    qc.gates.append(Gate("RZ", [gate.targets[0]], None,
                                         arg_value=-np.pi / 4,
                                         arg_label=r"-\pi/4"))
                    qc.gates.append(Gate("RZ", [gate.targets[1]], None,
                                         arg_value=-np.pi / 4,
                                         arg_label=r"-\pi/4"))
                    qc.gates.append(Gate("GLOBALPHASE", None, None,
                                         arg_value=-np.pi / 4,
                                         arg_label=r"-\pi/4"))
                elif gate.name == "ISWAP":
                    qc.gates.append(Gate("RZ", [gate.targets[0]], None,
                                         arg_value=-np.pi / 2,
                                         arg_label=r"-\pi/2"))
                    qc.gates.append(Gate("RZ", [gate.targets[1]], None,
                                         arg_value=-np.pi / 2,
                                         arg_label=r"-\pi/2"))
                    qc.gates.append(Gate("GLOBALPHASE", None, None,
                                         arg_value=-np.pi / 2,
                                         arg_label=r"-\pi/2"))
        return qc
    def load_circuit(self, qc):
        gates = self.optimize_circuit(qc).gates
        self.global_phase = 0
        self.sx_u = np.zeros((len(gates), len(self.sx_ops)))
        self.sz_u = np.zeros((len(gates), len(self.sz_ops)))
        self.g_u = np.zeros((len(gates), len(self.cavityqubit_ops)))
        self.T_list = []
        n = 0
        for gate in gates:
            if gate.name == "ISWAP":
                t0, t1 = gate.targets[0], gate.targets[1]
                self.sz_u[n, t0] = self.wq[t0] - self.w0
                self.sz_u[n, t1] = self.wq[t1] - self.w0
                self.g_u[n, t0] = self.g[t0]
                self.g_u[n, t1] = self.g[t1]
                J = self.g[t0] * self.g[t1] * (1 / self.Delta[t0] +
                                               1 / self.Delta[t1]) / 2
                T = (4 * np.pi / abs(J)) / 4
                self.T_list.append(T)
                n += 1
            elif gate.name == "SQRTISWAP":
                t0, t1 = gate.targets[0], gate.targets[1]
                self.sz_u[n, t0] = self.wq[t0] - self.w0
                self.sz_u[n, t1] = self.wq[t1] - self.w0
                self.g_u[n, t0] = self.g[t0]
                self.g_u[n, t1] = self.g[t1]
                J = self.g[t0] * self.g[t1] * (1 / self.Delta[t0] +
                                               1 / self.Delta[t1]) / 2
                T = (4 * np.pi / abs(J)) / 8
                self.T_list.append(T)
                n += 1
            elif gate.name == "RZ":
                g = self.sz_coeff[gate.targets[0]]
                self.sz_u[n, gate.targets[0]] = np.sign(gate.arg_value) * g
                T = abs(gate.arg_value) / (2 * g)
                self.T_list.append(T)
                n += 1
            elif gate.name == "RX":
                g = self.sx_coeff[gate.targets[0]]
                self.sx_u[n, gate.targets[0]] = np.sign(gate.arg_value) * g
                T = abs(gate.arg_value) / (2 * g)
                self.T_list.append(T)
                n += 1
            elif gate.name == "GLOBALPHASE":
                self.global_phase += gate.arg_value
            else:
                raise ValueError("Unsupported gate %s" % gate.name)
 | |
| 
	# Copyright (c) 2015 OpenStack Foundation
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
UUID = "3fc2ba62-9a02-433e-b565-d493ffc69034"
image_list_fixture = {
    "images": [
        {
            "checksum": "9cb02fe7fcac26f8a25d6db3109063ae",
            "container_format": "bare",
            "created_at": "2015-07-23T16:58:50.000000",
            "deleted": "false",
            "deleted_at": "null",
            "disk_format": "raw",
            "id": UUID,
            "is_public": "false",
            "min_disk": 0,
            "min_ram": 0,
            "name": "test",
            "owner": "3447cea05d6947658d73791ed9e0ed9f",
            "properties": {
                "kernel_id": 1234,
                "ramdisk_id": 5678
            },
            "protected": "false",
            "size": 145,
            "status": "active",
            "updated_at": "2015-07-23T16:58:51.000000",
            "virtual_size": "null"
        }
    ]
}
image_show_fixture = {
    "checksum": "9cb02fe7fcac26f8a25d6db3109063ae",
    "container_format": "bare",
    "created_at": "2015-07-24T12:18:13Z",
    "disk_format": "raw",
    "file": "/v2/images/%s/file" % UUID,
    "id": UUID,
    "kernel_id": "1234",
    "min_disk": 0,
    "min_ram": 0,
    "name": "img1",
    "owner": "411423405e10431fb9c47ac5b2446557",
    "protected": "false",
    "ramdisk_id": "5678",
    "schema": "/v2/schemas/image",
    "self": "/v2/images/%s" % UUID,
    "size": 145,
    "status": "active",
    "tags": [],
    "updated_at": "2015-07-24T12:18:13Z",
    "virtual_size": "null",
    "visibility": "private"
}
schema_fixture = {
    "additionalProperties": {
        "type": "string"
    },
    "links": [
        {
            "href": "{self}",
            "rel": "self"
        },
        {
            "href": "{file}",
            "rel": "enclosure"
        },
        {
            "href": "{schema}",
            "rel": "describedby"
        }
    ],
    "name": "image",
    "properties": {
        "architecture": {
            "description": "Operating system architecture as specified in "
                           "http://docs.openstack.org/trunk/openstack-compute"
                           "/admin/content/adding-images.html",
            "is_base": "false",
            "type": "string"
        },
        "checksum": {
            "description": "md5 hash of image contents. (READ-ONLY)",
            "maxLength": 32,
            "type": [
                "null",
                "string"
            ]
        },
        "container_format": {
            "description": "Format of the container",
            "enum": [
                "null",
                "ami",
                "ari",
                "aki",
                "bare",
                "ovf",
                "ova"
            ],
            "type": [
                "null",
                "string"
            ]
        },
        "created_at": {
            "description": "Date and time of image registration (READ-ONLY)",
            "type": "string"
        },
        "direct_url": {
            "description": "URL to access the image file kept in external "
                           "store (READ-ONLY)",
            "type": "string"
        },
        "disk_format": {
            "description": "Format of the disk",
            "enum": [
                "null",
                "ami",
                "ari",
                "aki",
                "vhd",
                "vmdk",
                "raw",
                "qcow2",
                "vdi",
                "iso"
            ],
            "type": [
                "null",
                "string"
            ]
        },
        "file": {
            "description": "(READ-ONLY)",
            "type": "string"
        },
        "id": {
            "description": "An identifier for the image",
            "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F])"
                       "{4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
            "type": "string"
        },
        "instance_uuid": {
            "description": "ID of instance used to create this image.",
            "is_base": "false",
            "type": "string"
        },
        "kernel_id": {
            "description": "ID of image stored in Glance that should be used "
                           "as the kernel when booting an AMI-style image.",
            "is_base": "false",
            "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F])"
                       "{4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
            "type": [
                "null",
                "string"
            ]
        },
        "locations": {
            "description": "A set of URLs to access the image file kept "
                           "in external store",
            "items": {
                "properties": {
                    "metadata": {
                        "type": "object"
                    },
                    "url": {
                        "maxLength": 255,
                        "type": "string"
                    }
                },
                "required": [
                    "url",
                    "metadata"
                ],
                "type": "object"
            },
            "type": "array"
        },
        "min_disk": {
            "description": "Amount of disk space (in GB) required to "
                           "boot image.",
            "type": "integer"
        },
        "min_ram": {
            "description": "Amount of ram (in MB) required to boot image.",
            "type": "integer"
        },
        "name": {
            "description": "Descriptive name for the image",
            "maxLength": 255,
            "type": [
                "null",
                "string"
            ]
        },
        "os_distro": {
            "description": "Common name of operating system distribution as "
                           "specified in http://docs.openstack.org/trunk/"
                           "openstack-compute/admin/content/"
                           "adding-images.html",
            "is_base": "false",
            "type": "string"
        },
        "os_version": {
            "description": "Operating system version as specified "
                           "by the distributor",
            "is_base": "false",
            "type": "string"
        },
        "owner": {
            "description": "Owner of the image",
            "maxLength": 255,
            "type": [
                "null",
                "string"
            ]
        },
        "protected": {
            "description": "If true, image will not be deletable.",
            "type": "boolean"
        },
        "ramdisk_id": {
            "description": "ID of image stored in Glance that should be used "
                           "as the ramdisk when booting an AMI-style image.",
            "is_base": "false",
            "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F])"
                       "{4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
            "type": [
                "null",
                "string"
            ]
        },
        "schema": {
            "description": "(READ-ONLY)",
            "type": "string"
        },
        "self": {
            "description": "(READ-ONLY)",
            "type": "string"
        },
        "size": {
            "description": "Size of image file in bytes (READ-ONLY)",
            "type": [
                "null",
                "integer"
            ]
        },
        "status": {
            "description": "Status of the image (READ-ONLY)",
            "enum": [
                "queued",
                "saving",
                "active",
                "killed",
                "deleted",
                "pending_delete"
            ],
            "type": "string"
        },
        "tags": {
            "description": "List of strings related to the image",
            "items": {
                "maxLength": 255,
                "type": "string"
            },
            "type": "array"
        },
        "updated_at": {
            "description": "Date and time of the last image "
                           "modification (READ-ONLY)",
            "type": "string"
        },
        "virtual_size": {
            "description": "Virtual size of image in bytes (READ-ONLY)",
            "type": [
                "null",
                "integer"
            ]
        },
        "visibility": {
            "description": "Scope of image accessibility",
            "enum": [
                "public",
                "private"
            ],
            "type": "string"
        }
    }
}
 | |
| 
	#
# WCSMatch.py -- WCSMatch plugin for Ginga reference viewer
#
# Eric Jeschke ([email protected])
#
# Copyright (c)  Eric R. Jeschke.  All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga import GingaPlugin
from ginga.gw import Widgets
from ginga.util import wcs
from ginga.misc import Bunch
class WCSMatch(GingaPlugin.GlobalPlugin):
    """
    *** This plugin is experimental/alpha/testing/preview ***
    WCSMatch is a global plugin for the Ginga image viewer that allows
    you to roughly align images with different scales and orientations
    using WCS for viewing purposes.
    To use, simply start the plugin, and from the plugin GUI select a
    channel from the drop-down menu labeled "Reference Channel".  The
    image contained in that channel will be used as a reference for
    zooming and orienting the images in the other channels.
    The channels will be synchronized in viewing (zoom, pan, rotate,
    transform).  To "unlock" the synchronization, simply select "None"
    from the "Reference Channel" drop-down menu.
    Currently there is no way to limit the channels that are affected
    by the plugin.
    """
    def __init__(self, fv):
        # superclass defines some variables for us, like logger
        super(WCSMatch, self).__init__(fv)
        self.channel = {}
        self.chnames = []
        self.ref_channel = None
        self.ref_image = None
        self.gui_up = False
        self._cur_opn_fitsimage = None
        fv.add_callback('add-channel', self.add_channel)
        fv.add_callback('delete-channel', self.delete_channel)
    def build_gui(self, container):
        top = Widgets.VBox()
        top.set_border_width(4)
        vbox, sw, orientation = Widgets.get_oriented_box(container)
        vbox.set_border_width(4)
        vbox.set_spacing(2)
        self.msgFont = self.fv.getFont("sansFont", 12)
        tw = Widgets.TextArea(wrap=True, editable=False)
        tw.set_font(self.msgFont)
        self.tw = tw
        fr = Widgets.Expander("Instructions")
        fr.set_widget(tw)
        vbox.add_widget(fr, stretch=0)
        fr = Widgets.Frame("WCS Match")
        captions = ((' Reference Channel:', 'label',
                     'ref channel', 'combobox'),
                    )
        w, b = Widgets.build_info(captions, orientation=orientation)
        self.w = b
        b.ref_channel.add_callback('activated', self.set_reference_channel_cb)
        fr.set_widget(w)
        vbox.add_widget(fr, stretch=0)
        spacer = Widgets.Label('')
        vbox.add_widget(spacer, stretch=1)
        top.add_widget(sw, stretch=1)
        btns = Widgets.HBox()
        btns.set_spacing(3)
        btn = Widgets.Button("Close")
        btn.add_callback('activated', lambda w: self.close())
        btns.add_widget(btn, stretch=0)
        btns.add_widget(Widgets.Label(''), stretch=1)
        top.add_widget(btns, stretch=0)
        container.add_widget(top, stretch=1)
        self.gui_up = True
        self._reset_channels_gui()
    def add_channel(self, viewer, channel):
        chname = channel.name
        info = Bunch.Bunch(chinfo=channel)
        self.channel[chname] = info
        # Add callbacks to the viewer for all the scale, pan, rotation and
        # transform settings changes
        fitsimage = channel.fitsimage
        fitssettings = fitsimage.get_settings()
        fitsimage.add_callback('image-set', self.new_image_cb, info)
        fitssettings.getSetting('scale').add_callback('set',
                                                   self.zoomset_cb, fitsimage, info)
        fitssettings.getSetting('rot_deg').add_callback('set',
                                                        self.rotset_cb, fitsimage, info)
        for name in ('flip_x', 'flip_y', 'swap_xy'):
            fitssettings.getSetting(name).add_callback('set',
                                                       self.xfmset_cb, fitsimage, info)
        fitssettings.getSetting('pan').add_callback('set',
                                                    self.panset_cb, fitsimage, info)
        self.fv.gui_do(self._reset_channels_gui)
    def delete_channel(self, viewer, channel):
        chname = channel.name
        self.logger.debug("deleting channel %s" % (chname))
        self.active = None
        self.info = None
        del self.channel[chname]
        self.fv.gui_do(self._reset_channels_gui)
    def _reset_channels_gui(self):
        self.chnames = list(self.fv.get_channelNames())
        self.chnames.sort()
        self.chnames.insert(0, "None")
        if not self.gui_up:
            return
        self.w.ref_channel.clear()
        for chname in self.chnames:
            self.w.ref_channel.append_text(chname)
    # CALLBACKS
    def new_image_cb(self, fitsimage, image, info):
        # add cb to image so that if it is modified we can update info
        #image.add_callback('modified', self.image_update_cb, fitsimage, info)
        #self.set_info(info, fitsimage)
        self.logger.info("Channel '%s' setting image" % (info.chinfo.name))
        if info.chinfo == self.ref_channel:
            self.ref_image = image
        return True
    def set_reference_channel_cb(self, w, idx):
        chname = self.chnames[idx]
        if chname == 'None':
            self.ref_image = None
            self.ref_channel = None
        chinfo = self.fv.get_channelInfo(chname)
        self.ref_channel = chinfo
        fitsimage = chinfo.fitsimage
        self.ref_image = fitsimage.get_image()
        # reset the scale base to be identical in both axes for the
        # reference image
        fitsimage.set_scale_base_xy(1.0, 1.0)
        self.scale_all_relative(fitsimage, chinfo)
        self.rotate_all_relative(fitsimage, chinfo)
        self.transform_all_relative(fitsimage, chinfo)
        self.pan_all_relative(fitsimage, chinfo)
        self.logger.info("set reference channel to '%s'" % (chname))
    def close(self):
        self.fv.stop_global_plugin(str(self))
        return True
    def instructions(self):
        self.tw.set_text(WCSMatch.__doc__)
    def start(self):
        self.instructions()
    def stop(self):
        self.ref_channel = None
        self.ref_image = None
        self.fv.showStatus("")
    def get_other_channels(self, myname):
        return set(self.fv.get_channelNames()) - set([myname])
    def zoomset_cb(self, setting, value, fitsimage, info):
        """This callback is called when a channel window is zoomed.
        """
        # Don't do anything if we are not active
        if not self.gui_up or self.ref_image is None:
            return
        # if this is not a zoom event from the focus window then
        # don't do anything
        ## focus_fitsimage = self.fv.getfocus_fitsimage()
        ## if fitsimage != focus_fitsimage:
        ##     return
        if self._cur_opn_fitsimage is not None:
            return
        self._cur_opn_fitsimage = fitsimage
        try:
            self.scale_all_relative(fitsimage, info.chinfo)
        finally:
            self._cur_opn_fitsimage = None
    def scale_all_relative(self, fitsimage, chinfo):
        if self.ref_image is None:
            return
        # get native scale relative to reference image
        image = fitsimage.get_image()
        ort = wcs.get_relative_orientation(image, self.ref_image)
        self.logger.info("scale for channel '%s' relative to ref image %f,%f" % (
            chinfo.name, ort.rscale_x, ort.rscale_y))
        scale_x, scale_y = fitsimage.get_scale_xy()
        #scale_x, scale_y = value
        chg_x, chg_y = scale_x / ort.rscale_x, scale_y / ort.rscale_y
        self.logger.info("scale changed for channel '%s' by %f,%f" % (
            chinfo.name, chg_x, chg_y))
        # for all other channels except ours
        chnames = self.get_other_channels(chinfo.name)
        for chname in chnames:
            chinfo2 = self.fv.get_channelInfo(chname)
            # calculate scale from orientation to reference image
            image = chinfo2.fitsimage.get_image()
            if image is None:
                continue
            ort = wcs.get_relative_orientation(image, self.ref_image)
            new_scale_x, new_scale_y = (ort.rscale_x * chg_x,
                                        ort.rscale_y * chg_y)
            # apply that scale
            self.logger.info("changing scale for channel '%s' to %f,%f" % (
                chinfo2.name, new_scale_x, new_scale_y))
            chinfo2.fitsimage.scale_to(new_scale_x, new_scale_y)
    def rotset_cb(self, setting, value, fitsimage, info):
        """This callback is called when a channel window is rotated.
        """
        # Don't do anything if we are not active
        if not self.gui_up or self.ref_image is None:
            return
        # if this is not a zoom event from the focus window then
        # don't do anything
        ## focus_fitsimage = self.fv.getfocus_fitsimage()
        ## if fitsimage != focus_fitsimage:
        ##     return
        if self._cur_opn_fitsimage is not None:
            return
        self._cur_opn_fitsimage = fitsimage
        try:
            self.rotate_all_relative(fitsimage, info.chinfo)
        finally:
            self._cur_opn_fitsimage = None
    def rotate_all_relative(self, fitsimage, chinfo):
        if self.ref_image is None:
            return
        # get native scale relative to reference image
        image = fitsimage.get_image()
        if self.ref_image is None:
            return
        ort = wcs.get_relative_orientation(image, self.ref_image)
        self.logger.info("rotation for channel '%s' relative to ref image %f" % (
            chinfo.name, ort.rrot_deg))
        rot_deg = fitsimage.get_rotation()
        chg_rot_deg = rot_deg + ort.rrot_deg
        self.logger.info("rotation changed for channel '%s' by %f" % (
            chinfo.name, chg_rot_deg))
        # for all other channels except ours
        chnames = self.get_other_channels(chinfo.name)
        for chname in chnames:
            chinfo2 = self.fv.get_channelInfo(chname)
            # Get relative rotation of their image
            image = chinfo2.fitsimage.get_image()
            if image is None:
                continue
            ort = wcs.get_relative_orientation(image, self.ref_image)
            # Apply that rotation
            new_rot_deg = ort.rrot_deg + chg_rot_deg
            self.logger.info("changing rot for channel '%s' to %f" % (
                chinfo2.name, new_rot_deg))
            chinfo2.fitsimage.rotate(new_rot_deg)
    def panset_cb(self, setting, value, fitsimage, info):
        """This callback is called when a channel window is panned.
        """
        # Don't do anything if we are not active
        if not self.gui_up or self.ref_image is None:
            return
        # if this is not a zoom event from the focus window then
        # don't do anything
        ## focus_fitsimage = self.fv.getfocus_fitsimage()
        ## if fitsimage != focus_fitsimage:
        ##     return
        if self._cur_opn_fitsimage is not None:
            return
        self._cur_opn_fitsimage = fitsimage
        try:
            self.pan_all_relative(fitsimage, info.chinfo)
        finally:
            self._cur_opn_fitsimage = None
    def pan_all_relative(self, fitsimage, chinfo):
        if self.ref_image is None:
            return
        image = fitsimage.get_image()
        if self.ref_image is None:
            return
        pan_ra, pan_dec = fitsimage.get_pan(coord='wcs')
        # for all other channels except ours
        chnames = self.get_other_channels(chinfo.name)
        for chname in chnames:
            chinfo2 = self.fv.get_channelInfo(chname)
            # set pan position on their viewer
            image = chinfo2.fitsimage.get_image()
            if image is None:
                continue
            data_x, data_y = image.radectopix(pan_ra, pan_dec)
            chinfo2.fitsimage.panset_xy(data_x, data_y)
    def xfmset_cb(self, setting, value, fitsimage, info):
        """This callback is called when a channel window is transformed
        (flipped, or swap axes).
        """
        # Don't do anything if we are not active
        if not self.gui_up or self.ref_image is None:
            return
        # if this is not a zoom event from the focus window then
        # don't do anything
        ## focus_fitsimage = self.fv.getfocus_fitsimage()
        ## if fitsimage != focus_fitsimage:
        ##     return
        if self._cur_opn_fitsimage is not None:
            return
        self._cur_opn_fitsimage = fitsimage
        try:
            self.transform_all_relative(fitsimage, info.chinfo)
        finally:
            self._cur_opn_fitsimage = None
    def transform_all_relative(self, fitsimage, chinfo):
        if self.ref_image is None:
            return
        image = fitsimage.get_image()
        if self.ref_image is None:
            return
        flip_x, flip_y, swap_xy = fitsimage.get_transforms()
        # for all other channels except ours
        chnames = self.get_other_channels(chinfo.name)
        for chname in chnames:
            chinfo2 = self.fv.get_channelInfo(chname)
            # set our pan position on their viewer
            image = chinfo2.fitsimage.get_image()
            if image is None:
                continue
            chinfo2.fitsimage.transform(flip_x, flip_y, swap_xy)
    def redo(self):
        if self.ref_image is None:
            # no reference image
            return
        chinfo = self.fv.get_channelInfo(chname)
        viewer = chinfo.fitsimage
        image = viewer.get_image()
        ## if image == self.ref_image:
        ##     # current image is same as reference image
        ##     return
        info = wcs.get_relative_orientation(image, self.ref_image)
        self.logger.info("rscale_x=%f rscale_y=%f rrot_deg=%f" % (
            info.rscale_x, info.rscale_y, info.rrot_deg))
    def __str__(self):
        return 'wcsmatch'
#END
 | |
| 
	#    Copyright 2015 Mirantis, Inc.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.
import dictdiffer
import networkx as nx
from solar.core.log import log
from solar.core import resource
from solar.core.resource.resource import RESOURCE_STATE
from solar.core import signals
from solar.dblayer.solar_models import CommitedResource
from solar.dblayer.solar_models import LogItem
from solar.dblayer.solar_models import StrInt
from solar.events import api as evapi
from solar.events.controls import StateChange
from solar.orchestration import graph
from solar.system_log import data
from solar import utils
from solar.system_log.consts import CHANGES
def guess_action(from_, to):
    # NOTE(dshulyak) imo the way to solve this - is dsl for orchestration,
    # something where this action will be excplicitly specified
    if not from_:
        return CHANGES.run.name
    elif not to:
        return CHANGES.remove.name
    else:
        return CHANGES.update.name
def create_diff(staged, commited):
    def listify(t):
        # we need all values as lists, because we need the same behaviour
        # in pre and post save situations
        return list(map(listify, t)) if isinstance(t, (list, tuple)) else t
    res = tuple(dictdiffer.diff(commited, staged))
    return listify(res)
def create_logitem(resource, action, diffed, connections_diffed,
                   base_path=''):
    return LogItem.new(
        {'resource': resource,
         'action': action,
         'diff': diffed,
         'connections_diff': connections_diffed,
         'base_path': base_path,
         'log': 'staged'})
def create_sorted_diff(staged, commited):
    staged.sort()
    commited.sort()
    return create_diff(staged, commited)
def make_single_stage_item(resource_obj):
    commited = resource_obj.load_commited()
    base_path = resource_obj.base_path
    if resource_obj.to_be_removed():
        resource_args = {}
        resource_connections = []
    else:
        resource_args = resource_obj.args
        resource_connections = resource_obj.connections
    if commited.state == RESOURCE_STATE.removed.name:
        commited_args = {}
        commited_connections = []
    else:
        commited_args = commited.inputs
        commited_connections = commited.connections
    inputs_diff = create_diff(resource_args, commited_args)
    connections_diff = create_sorted_diff(
        resource_connections, commited_connections)
    # if new connection created it will be reflected in inputs
    # but using inputs to reverse connections is not possible
    if inputs_diff:
        li = create_logitem(
            resource_obj.name,
            guess_action(commited_args, resource_args),
            inputs_diff,
            connections_diff,
            base_path=base_path)
        li.save()
        return li
    return None
def stage_changes():
    for li in data.SL():
        li.delete()
    last = LogItem.history_last()
    since = StrInt.greater(last.updated) if last else None
    staged_log = utils.solar_map(make_single_stage_item,
                                 resource.load_updated(since), concurrency=10)
    staged_log = filter(None, staged_log)
    return staged_log
def send_to_orchestration():
    dg = nx.MultiDiGraph()
    events = {}
    changed_nodes = []
    for logitem in data.SL():
        events[logitem.resource] = evapi.all_events(logitem.resource)
        changed_nodes.append(logitem.resource)
        state_change = StateChange(logitem.resource, logitem.action)
        state_change.insert(changed_nodes, dg)
    evapi.build_edges(dg, events)
    # what `name` should be?
    dg.graph['name'] = 'system_log'
    return graph.create_plan_from_graph(dg)
def parameters(res, action, data):
    return {'args': [res, action],
            'type': 'solar_resource'}
def _get_args_to_update(args, connections):
    """Returns args to update
    For each resource we can update only args that are not provided
    by connections
    """
    inherited = [i[3].split(':')[0] for i in connections]
    return {
        key: args[key] for key in args
        if key not in inherited
    }
def revert_uids(uids):
    """Reverts uids
    :param uids: iterable not generator
    """
    items = LogItem.multi_get(uids)
    for item in items:
        if item.action == CHANGES.update.name:
            _revert_update(item)
        elif item.action == CHANGES.remove.name:
            _revert_remove(item)
        elif item.action == CHANGES.run.name:
            _revert_run(item)
        else:
            log.debug('Action %s for resource %s is a side'
                      ' effect of another action', item.action, item.res)
def _revert_remove(logitem):
    """Resource should be created with all previous connections"""
    commited = CommitedResource.get(logitem.resource)
    args = dictdiffer.revert(logitem.diff, commited.inputs)
    connections = dictdiffer.revert(
        logitem.connections_diff, sorted(commited.connections))
    resource.Resource(logitem.resource, logitem.base_path,
                      args=_get_args_to_update(args, connections),
                      tags=commited.tags)
    for emitter, emitter_input, receiver, receiver_input in connections:
        emmiter_obj = resource.load(emitter)
        receiver_obj = resource.load(receiver)
        signals.connect(emmiter_obj, receiver_obj, {
                        emitter_input: receiver_input})
def _update_inputs_connections(res_obj, args, old_connections, new_connections):  # NOQA
    removed = []
    for item in old_connections:
        if item not in new_connections:
            removed.append(item)
    added = []
    for item in new_connections:
        if item not in old_connections:
            added.append(item)
    for emitter, _, receiver, _ in removed:
        emmiter_obj = resource.load(emitter)
        receiver_obj = resource.load(receiver)
        emmiter_obj.disconnect(receiver_obj)
    for emitter, emitter_input, receiver, receiver_input in added:
        emmiter_obj = resource.load(emitter)
        receiver_obj = resource.load(receiver)
        emmiter_obj.connect(receiver_obj, {emitter_input: receiver_input})
    if removed or added:
        # TODO without save we will get error
        # that some values can not be updated
        # even if connection was removed
        receiver_obj.db_obj.save()
    res_obj.update(args)
def _revert_update(logitem):
    """Revert of update should update inputs and connections"""
    res_obj = resource.load(logitem.resource)
    commited = res_obj.load_commited()
    connections = dictdiffer.revert(
        logitem.connections_diff, sorted(commited.connections))
    args = dictdiffer.revert(logitem.diff, commited.inputs)
    _update_inputs_connections(
        res_obj, _get_args_to_update(args, connections),
        commited.connections, connections)
def _revert_run(logitem):
    res_obj = resource.load(logitem.resource)
    res_obj.remove()
def revert(uid):
    return revert_uids([uid])
def _discard_remove(item):
    resource_obj = resource.load(item.resource)
    resource_obj.set_created()
def _discard_update(item):
    resource_obj = resource.load(item.resource)
    old_connections = resource_obj.connections
    new_connections = dictdiffer.revert(
        item.connections_diff, sorted(old_connections))
    args = dictdiffer.revert(item.diff, resource_obj.args)
    _update_inputs_connections(
        resource_obj, _get_args_to_update(args, new_connections),
        old_connections, new_connections)
def _discard_run(item):
    resource.load(item.resource).remove(force=True)
def discard_uids(uids):
    items = LogItem.multi_get(uids)
    for item in items:
        if item.action == CHANGES.update.name:
            _discard_update(item)
        elif item.action == CHANGES.remove.name:
            _discard_remove(item)
        elif item.action == CHANGES.run.name:
            _discard_run(item)
        else:
            log.debug('Action %s for resource %s is a side'
                      ' effect of another action', item.action, item.res)
        item.delete()
def discard_uid(uid):
    return discard_uids([uid])
def discard_all():
    staged_log = data.SL()
    return discard_uids([l.uid for l in staged_log])
def commit_all():
    """Helper mainly for ease of testing"""
    from solar.system_log.operations import move_to_commited
    for item in data.SL():
        move_to_commited(item.log_action)
def clear_history():
    LogItem.delete_all()
    CommitedResource.delete_all()
 | |
| 
	# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A node transformer that includes utilities for SCT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from pyctr.core import anno
from pyctr.core import parsing
from pyctr.core import pretty_printer
from pyctr.sct import templates
# TODO(mdanatg): Use namedtuple.
class EntityInfo(object):
  """Contains information about a Python entity.
  Immutable.
  Examples of entities include functions and classes.
  Attributes:
    source_code: The entity's source code.
    source_file: The entity's source file.
    namespace: Dict[str, ], containing symbols visible to the entity (excluding
      parameters).
    arg_values: dict[str->*], containing parameter values, if known.
    arg_types: dict[str->*], containing parameter types, if known.
    owner_type: The surrounding class type of the function, if present.
  """
  # TODO(mdanatg): Remove the default and update tests.
  def __init__(self, source_code, source_file, namespace, arg_values, arg_types,
               owner_type):
    self.source_code = source_code
    self.source_file = source_file
    self.namespace = namespace
    self.arg_values = {} if arg_values is None else arg_values
    self.arg_types = {} if arg_types is None else arg_types
    self.owner_type = owner_type
# TODO(jmd1011): Use namedtuple.
class Context(object):
  """Contains information about a source code transformation.
  This object is mutable, and is updated during conversion. Not thread safe.
  Attributes:
    info: EntityInfo, immutable.
    current_origin: origin_info.OriginInfo, holds the OriginInfo of the last
      AST node to be processed successfully. Useful for error handling.
  """
  def __init__(self, info):
    self.info = info
    self.current_origin = None
# TODO(jmd1011): Consolidate these Context objects with overloads.Overloads.
class EntityContext(Context):
  """Tracks the conversion of a single entity.
  This object is mutable, and is updated during conversion. Not thread safe.
  Attributes:
    namer: Namer
    info: transformer.EntityInfo
  """
  def __init__(self, namer, entity_info):
    super(EntityContext, self).__init__(entity_info)
    self.namer = namer
class _StateStack(object):
  """Typed stack abstraction.
  This class provides syntactic sugar for a stack of objects of known
  type. It allows accessing attributes of the object at the top of the stack
  directly against this object, which allows for very terse syntax.
  For example, this code:
    stack = _StateStack(Foo)
    stack.enter()
    stack.bar
  Is equivalent to:
    stack = []
    stack.append(Foo())
    foo = stack[-1]
    foo.bar
  See _State for more on how this is used.
  Attributes:
    type: Any, the type of objects that this stack holds
    level: int, the current stack depth
    value: Any, the instance of the object at the top of the stack
  """
  def __init__(self, type_):
    # Because we override __setattr__, we need to attach these attributes using
    # the superclass' setattr.
    object.__setattr__(self, 'type', type_)
    object.__setattr__(self, '_stack', [])
    if not hasattr(type_, 'no_root'):
      self.enter()
  def enter(self):
    self._stack.append(self.type())
  def exit(self):
    return self._stack.pop()
  @property
  def level(self):
    return len(self._stack)
  @property
  def value(self):
    return self._stack[-1]
  def __iter__(self):
    return iter(self._stack)
  def __getattr__(self, key):
    return getattr(self._stack[-1], key)
  def __setattr__(self, key, value):
    setattr(self._stack[-1], key, value)
class _State(object):
  """Supporting class for nested scope variable space for converter.Base.
  This structure offers syntactic sugar over a dict of stacks of objects
  of known type. These structures are useful to keep state during AST walks.
  Multiple different scopes can be tracked in parallel. For example:
    s = _State()
    s[foo].enter()
    s[bar].enter()  # this will not affect s[foo]
  Element access has special semantics:
    * keys are a data type
    * element values are _StateStack(type=key) objects
    * missing elements are automatically added, similarly to defaultdict
  For example, the following block :
    _State s
    s[Foo]
  Is equivalent to:
    s = {}
    if Foo not in s:
      s[Foo] = Foo()
    s[Foo]
  See Base for how it's used.
  """
  def __init__(self):
    self._value = {}
  def __getitem__(self, key):
    if key not in self._value:
      self._value[key] = _StateStack(key)
    return self._value[key]
class Base(gast.NodeTransformer):
  """Base class for general-purpose code transformers.
  This is an extension of gast.NodeTransformer that provides a few additional
  functions, like state tracking within the scope of arbitrary node, helpers
  for processing code blocks, debugging, mapping of transformed code to
  original code, and others.
  The transformer allows keeping state across calls to visit_* that is local to
  arbitrary nodes and their descendants, using the self.state attribute.
  Multiple independent scopes are allowed and automatically constructed.
  For example, to keep track of the If node that encloses any Name node, one can
  write:
    class FooType(object):
      def __init__(self):
        self.foo_property = None
    class DummyTransformer(Base):
      def visit_If(self, node):
        self.state[FooType].enter()
        self.state[FooType].foo_property = node
      def visit_Name(self, node):
        self.state[FooType].foo_property  # will hold the innermost enclosing if
  """
  # TODO(mdanatg): Document all extra features.
  def __init__(self, entity_info):
    """Initialize the transformer.
    Subclasses should call this.
    Args:
      entity_info: An EntityInfo object.
    """
    self._lineno = 0
    self._col_offset = 0
    # TODO(znado): remove this from the constructor of all Transformers.
    self.entity_info = entity_info
    self._enclosing_entities = []
    # Allows scoping of local variables to keep state across calls to visit_*
    # methods. Multiple scope hierarchies may exist and are keyed by tag. A
    # scope is valid at one or more nodes and all its children. Scopes created
    # in child nodes supersede their parent. Scopes are isolated from one
    # another.
    self.state = _State()
  @property
  def enclosing_entities(self):
    return tuple(self._enclosing_entities)
  def debug_print(self, node):
    """Helper method useful for debugging."""
    if __debug__:
      print(pretty_printer.fmt(node))
    return node
  def create_assignment(self, target, expression):
    template = """
      target = expression
    """
    return templates.replace(template, target=target, expression=expression)
  def visit_block(self, nodes, before_visit=None, after_visit=None):
    """A more powerful version of generic_visit for statement blocks.
    An example of a block is the body of an if statement.
    This function allows specifying a postprocessing callback (the
    after_visit argument) argument which can be used to move nodes to a new
    destination. This is done by after_visit by returning a non-null
    second return value, e.g. return new_node, new_destination.
    For example, a transformer could perform the following move:
        foo()
        bar()
        baz()
        foo()
        if cond:
          bar()
          baz()
    The above could be done with a postprocessor of this kind:
        def after_visit(node):
          if node_is_function_call(bar):
            new_container_node = build_cond()
            new_container_node.body.append(node)
            return new_container_node, new_container_node.body
          else:
            # Once we set a new destination, all subsequent items will be
            # moved to it, so we don't need to explicitly handle baz.
            return node, None
    Args:
      nodes: enumerable of AST node objects. If None, the function returns None.
      before_visit: optional callable that is called before visiting each item
        in nodes
      after_visit: optional callable that takes in an AST node and returns a
        tuple (new_node, new_destination). It is called after visiting each item
        in nodes. Is used in the same was as the
          visit_* methods: new_node will replace the node; if not None,
            new_destination must be a list, and subsequent nodes will be placed
            in this list instead of the list returned by visit_block.
    Returns:
      A list of AST node objects containing the transformed items fron nodes,
      except those nodes that have been relocated using after_visit.
    """
    if nodes is None:
      return None
    results = []
    node_destination = results
    for node in nodes:
      if before_visit:
        # TODO(mdanatg): We can modify node here too, if ever needed.
        before_visit()
      replacement = self.visit(node)
      if after_visit and replacement:
        replacement, new_destination = after_visit(replacement)
      else:
        new_destination = None
      if replacement:
        if isinstance(replacement, (list, tuple)):
          node_destination.extend(replacement)
        else:
          node_destination.append(replacement)
      # Allow the postprocessor to reroute the remaining nodes to a new list.
      if new_destination is not None:
        node_destination = new_destination
    return results
  # TODO(mdanatg): Remove.
  def apply_to_single_assignments(self, targets, values, apply_fn):
    """Applies a function to each individual assignment.
    This function can process a possibly-unpacked (e.g. a, b = c, d) assignment.
    It tries to break down the unpacking if possible. In effect, it has the same
    effect as passing the assigned values in SSA form to apply_fn.
    Examples:
    The following will result in apply_fn(a, c), apply_fn(b, d):
        a, b = c, d
    The following will result in apply_fn(a, c[0]), apply_fn(b, c[1]):
        a, b = c
    The following will result in apply_fn(a, (b, c)):
        a = b, c
    It uses the visitor pattern to allow subclasses to process single
    assignments individually.
    Args:
      targets: list, tuple of or individual AST node. Should be used with the
        targets field of an ast.Assign node.
      values: an AST node.
      apply_fn: a function of a single argument, which will be called with the
        respective nodes of each single assignment. The signature is
        apply_fn(target, value), no return value.
    """
    if not isinstance(targets, (list, tuple)):
      targets = (targets,)
    for target in targets:
      if isinstance(target, (gast.Tuple, gast.List)):
        for i in range(len(target.elts)):
          target_el = target.elts[i]
          if isinstance(values, (gast.Tuple, gast.List)):
            value_el = values.elts[i]
          else:
            value_el = gast.Subscript(values, gast.Index(i), ctx=gast.Store())
          self.apply_to_single_assignments(target_el, value_el, apply_fn)
      else:
        # TODO(mdanatg): Look into allowing to rewrite the AST here.
        apply_fn(target, values)
  def _get_source(self, node):
    try:
      source, _ = parsing.ast_to_source(node)
      return source
    # pylint: disable=broad-except
    # This function is used for error reporting.  If an exception occurs here,
    # it should be suppressed, in favor of emitting as informative a message
    # about the original error as possible.
    except Exception:
      return '<could not convert AST to source>'
  def visit(self, node):
    if not isinstance(node, gast.AST):
      # This is not that uncommon a mistake: various node bodies are lists, for
      # example, posing a land mine for transformers that need to recursively
      # call `visit`.  The error needs to be raised before the exception handler
      # below is installed, because said handler will mess up if `node` is not,
      # in fact, a node.
      msg = ('invalid value for "node": expected "ast.AST", got "{}"; to'
             ' visit lists of nodes, use "visit_block" instead').format(
                 type(node))
      raise ValueError(msg)
    did_enter_function = False
    processing_expr_node = False
    if isinstance(node, (gast.FunctionDef, gast.ClassDef, gast.Lambda)):
      did_enter_function = True
    elif isinstance(node, gast.Expr):
      processing_expr_node = True
    if did_enter_function:
      self._enclosing_entities.append(node)
    if processing_expr_node:
      entry_expr_value = node.value
    if not anno.hasanno(node, anno.Basic.SKIP_PROCESSING):
      result = super(Base, self).visit(node)
    # Adjust for consistency: replacing the value of an Expr with
    # an Assign node removes the need for the Expr node.
    if processing_expr_node:
      if isinstance(result, gast.Expr) and result.value != entry_expr_value:
        # When the replacement is a list, it is assumed that the list came
        # from a template that contained a number of statements, which
        # themselves are standalone and don't require an enclosing Expr.
        if isinstance(result.value, (list, tuple, gast.Assign, gast.AugAssign)):
          result = result.value
    # On exception, the local scope integrity is not guaranteed.
    if did_enter_function:
      self._enclosing_entities.pop()
    return result
 | |
| 
	#!/usr/bin/python2.4
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Oauth2client.file tests
Unit tests for oauth2client.file
"""
__author__ = '[email protected] (Joe Gregorio)'
import copy
import datetime
import json
import os
import pickle
import stat
import tempfile
import unittest
from .http_mock import HttpMockSequence
import six
from oauth2client import file
from oauth2client import locked_file
from oauth2client import multistore_file
from oauth2client import util
from oauth2client.client import AccessTokenCredentials
from oauth2client.client import OAuth2Credentials
from six.moves import http_client
try:
  # Python2
  from future_builtins import oct
except:
  pass
FILENAME = tempfile.mktemp('oauth2client_test.data')
class OAuth2ClientFileTests(unittest.TestCase):
  def tearDown(self):
    try:
      os.unlink(FILENAME)
    except OSError:
      pass
  def setUp(self):
    try:
      os.unlink(FILENAME)
    except OSError:
      pass
  def create_test_credentials(self, client_id='some_client_id',
                              expiration=None):
    access_token = 'foo'
    client_secret = 'cOuDdkfjxxnv+'
    refresh_token = '1/0/a.df219fjls0'
    token_expiry = expiration or datetime.datetime.utcnow()
    token_uri = 'https://www.google.com/accounts/o8/oauth2/token'
    user_agent = 'refresh_checker/1.0'
    credentials = OAuth2Credentials(
        access_token, client_id, client_secret,
        refresh_token, token_expiry, token_uri,
        user_agent)
    return credentials
  def test_non_existent_file_storage(self):
    s = file.Storage(FILENAME)
    credentials = s.get()
    self.assertEquals(None, credentials)
  def test_no_sym_link_credentials(self):
    if hasattr(os, 'symlink'):
      SYMFILENAME = FILENAME + '.sym'
      os.symlink(FILENAME, SYMFILENAME)
      s = file.Storage(SYMFILENAME)
      try:
        s.get()
        self.fail('Should have raised an exception.')
      except file.CredentialsFileSymbolicLinkError:
        pass
      finally:
        os.unlink(SYMFILENAME)
  def test_pickle_and_json_interop(self):
    # Write a file with a pickled OAuth2Credentials.
    credentials = self.create_test_credentials()
    f = open(FILENAME, 'wb')
    pickle.dump(credentials, f)
    f.close()
    # Storage should be not be able to read that object, as the capability to
    # read and write credentials as pickled objects has been removed.
    s = file.Storage(FILENAME)
    read_credentials = s.get()
    self.assertEquals(None, read_credentials)
    # Now write it back out and confirm it has been rewritten as JSON
    s.put(credentials)
    with open(FILENAME) as f:
      data = json.load(f)
    self.assertEquals(data['access_token'], 'foo')
    self.assertEquals(data['_class'], 'OAuth2Credentials')
    self.assertEquals(data['_module'], OAuth2Credentials.__module__)
  def test_token_refresh_store_expired(self):
    expiration = datetime.datetime.utcnow() - datetime.timedelta(minutes=15)
    credentials = self.create_test_credentials(expiration=expiration)
    s = file.Storage(FILENAME)
    s.put(credentials)
    credentials = s.get()
    new_cred = copy.copy(credentials)
    new_cred.access_token = 'bar'
    s.put(new_cred)
    access_token = '1/3w'
    token_response = {'access_token': access_token, 'expires_in': 3600}
    http = HttpMockSequence([
        ({'status': '200'}, json.dumps(token_response).encode('utf-8')),
    ])
    credentials._refresh(http.request)
    self.assertEquals(credentials.access_token, access_token)
  def test_token_refresh_store_expires_soon(self):
    # Tests the case where an access token that is valid when it is read from
    # the store expires before the original request succeeds.
    expiration = datetime.datetime.utcnow() + datetime.timedelta(minutes=15)
    credentials = self.create_test_credentials(expiration=expiration)
    s = file.Storage(FILENAME)
    s.put(credentials)
    credentials = s.get()
    new_cred = copy.copy(credentials)
    new_cred.access_token = 'bar'
    s.put(new_cred)
    access_token = '1/3w'
    token_response = {'access_token': access_token, 'expires_in': 3600}
    http = HttpMockSequence([
        ({'status': str(http_client.UNAUTHORIZED)}, b'Initial token expired'),
        ({'status': str(http_client.UNAUTHORIZED)}, b'Store token expired'),
        ({'status': str(http_client.OK)},
         json.dumps(token_response).encode('utf-8')),
        ({'status': str(http_client.OK)},
         b'Valid response to original request')
    ])
    credentials.authorize(http)
    http.request('https://example.com')
    self.assertEqual(credentials.access_token, access_token)
  def test_token_refresh_good_store(self):
    expiration = datetime.datetime.utcnow() + datetime.timedelta(minutes=15)
    credentials = self.create_test_credentials(expiration=expiration)
    s = file.Storage(FILENAME)
    s.put(credentials)
    credentials = s.get()
    new_cred = copy.copy(credentials)
    new_cred.access_token = 'bar'
    s.put(new_cred)
    credentials._refresh(lambda x: x)
    self.assertEquals(credentials.access_token, 'bar')
  def test_token_refresh_stream_body(self):
    expiration = datetime.datetime.utcnow() + datetime.timedelta(minutes=15)
    credentials = self.create_test_credentials(expiration=expiration)
    s = file.Storage(FILENAME)
    s.put(credentials)
    credentials = s.get()
    new_cred = copy.copy(credentials)
    new_cred.access_token = 'bar'
    s.put(new_cred)
    valid_access_token = '1/3w'
    token_response = {'access_token': valid_access_token, 'expires_in': 3600}
    http = HttpMockSequence([
        ({'status': str(http_client.UNAUTHORIZED)}, b'Initial token expired'),
        ({'status': str(http_client.UNAUTHORIZED)}, b'Store token expired'),
        ({'status': str(http_client.OK)},
         json.dumps(token_response).encode('utf-8')),
        ({'status': str(http_client.OK)}, 'echo_request_body')
    ])
    body = six.StringIO('streaming body')
    credentials.authorize(http)
    _, content = http.request('https://example.com', body=body)
    self.assertEqual(content, 'streaming body')
    self.assertEqual(credentials.access_token, valid_access_token)
  def test_credentials_delete(self):
    credentials = self.create_test_credentials()
    s = file.Storage(FILENAME)
    s.put(credentials)
    credentials = s.get()
    self.assertNotEquals(None, credentials)
    s.delete()
    credentials = s.get()
    self.assertEquals(None, credentials)
  def test_access_token_credentials(self):
    access_token = 'foo'
    user_agent = 'refresh_checker/1.0'
    credentials = AccessTokenCredentials(access_token, user_agent)
    s = file.Storage(FILENAME)
    credentials = s.put(credentials)
    credentials = s.get()
    self.assertNotEquals(None, credentials)
    self.assertEquals('foo', credentials.access_token)
    mode = os.stat(FILENAME).st_mode
    if os.name == 'posix':
      self.assertEquals('0o600', oct(stat.S_IMODE(os.stat(FILENAME).st_mode)))
  def test_read_only_file_fail_lock(self):
    credentials = self.create_test_credentials()
    open(FILENAME, 'a+b').close()
    os.chmod(FILENAME, 0o400)
    store = multistore_file.get_credential_storage(
        FILENAME,
        credentials.client_id,
        credentials.user_agent,
        ['some-scope', 'some-other-scope'])
    store.put(credentials)
    if os.name == 'posix':
      self.assertTrue(store._multistore._read_only)
    os.chmod(FILENAME, 0o600)
  def test_multistore_no_symbolic_link_files(self):
    if hasattr(os, 'symlink'):
      SYMFILENAME = FILENAME + 'sym'
      os.symlink(FILENAME, SYMFILENAME)
      store = multistore_file.get_credential_storage(
          SYMFILENAME,
          'some_client_id',
          'user-agent/1.0',
          ['some-scope', 'some-other-scope'])
      try:
        store.get()
        self.fail('Should have raised an exception.')
      except locked_file.CredentialsFileSymbolicLinkError:
        pass
      finally:
        os.unlink(SYMFILENAME)
  def test_multistore_non_existent_file(self):
    store = multistore_file.get_credential_storage(
        FILENAME,
        'some_client_id',
        'user-agent/1.0',
        ['some-scope', 'some-other-scope'])
    credentials = store.get()
    self.assertEquals(None, credentials)
  def test_multistore_file(self):
    credentials = self.create_test_credentials()
    store = multistore_file.get_credential_storage(
        FILENAME,
        credentials.client_id,
        credentials.user_agent,
        ['some-scope', 'some-other-scope'])
    store.put(credentials)
    credentials = store.get()
    self.assertNotEquals(None, credentials)
    self.assertEquals('foo', credentials.access_token)
    store.delete()
    credentials = store.get()
    self.assertEquals(None, credentials)
    if os.name == 'posix':
      self.assertEquals('0o600', oct(stat.S_IMODE(os.stat(FILENAME).st_mode)))
  def test_multistore_file_custom_key(self):
    credentials = self.create_test_credentials()
    custom_key = {'myapp': 'testing', 'clientid': 'some client'}
    store = multistore_file.get_credential_storage_custom_key(
        FILENAME, custom_key)
    store.put(credentials)
    stored_credentials = store.get()
    self.assertNotEquals(None, stored_credentials)
    self.assertEqual(credentials.access_token, stored_credentials.access_token)
    store.delete()
    stored_credentials = store.get()
    self.assertEquals(None, stored_credentials)
  def test_multistore_file_custom_string_key(self):
    credentials = self.create_test_credentials()
    # store with string key
    store = multistore_file.get_credential_storage_custom_string_key(
        FILENAME, 'mykey')
    store.put(credentials)
    stored_credentials = store.get()
    self.assertNotEquals(None, stored_credentials)
    self.assertEqual(credentials.access_token, stored_credentials.access_token)
    # try retrieving with a dictionary
    store_dict = multistore_file.get_credential_storage_custom_string_key(
        FILENAME, {'key': 'mykey'})
    stored_credentials = store.get()
    self.assertNotEquals(None, stored_credentials)
    self.assertEqual(credentials.access_token, stored_credentials.access_token)
    store.delete()
    stored_credentials = store.get()
    self.assertEquals(None, stored_credentials)
  def test_multistore_file_backwards_compatibility(self):
    credentials = self.create_test_credentials()
    scopes = ['scope1', 'scope2']
    # store the credentials using the legacy key method
    store = multistore_file.get_credential_storage(
        FILENAME, 'client_id', 'user_agent', scopes)
    store.put(credentials)
    # retrieve the credentials using a custom key that matches the legacy key
    key = {'clientId': 'client_id', 'userAgent': 'user_agent',
           'scope': util.scopes_to_string(scopes)}
    store = multistore_file.get_credential_storage_custom_key(FILENAME, key)
    stored_credentials = store.get()
    self.assertEqual(credentials.access_token, stored_credentials.access_token)
  def test_multistore_file_get_all_keys(self):
    # start with no keys
    keys = multistore_file.get_all_credential_keys(FILENAME)
    self.assertEquals([], keys)
    # store credentials
    credentials = self.create_test_credentials(client_id='client1')
    custom_key = {'myapp': 'testing', 'clientid': 'client1'}
    store1 = multistore_file.get_credential_storage_custom_key(
        FILENAME, custom_key)
    store1.put(credentials)
    keys = multistore_file.get_all_credential_keys(FILENAME)
    self.assertEquals([custom_key], keys)
    # store more credentials
    credentials = self.create_test_credentials(client_id='client2')
    string_key = 'string_key'
    store2 = multistore_file.get_credential_storage_custom_string_key(
        FILENAME, string_key)
    store2.put(credentials)
    keys = multistore_file.get_all_credential_keys(FILENAME)
    self.assertEquals(2, len(keys))
    self.assertTrue(custom_key in keys)
    self.assertTrue({'key': string_key} in keys)
    # back to no keys
    store1.delete()
    store2.delete()
    keys = multistore_file.get_all_credential_keys(FILENAME)
    self.assertEquals([], keys)
if __name__ == '__main__':
  unittest.main()
 | |
| 
	# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import unittest
import os
import numpy as np
from mxnet import gluon
from mxnet.gluon import nn
from mxnet.test_utils import assert_almost_equal
from common import setup_module, with_seed, assertRaises
from copy import deepcopy
from nose.tools import raises, assert_raises
@with_seed()
@raises(RuntimeError)
def test_multi_trainer():
    x = gluon.Parameter('x', shape=(10,), stype='row_sparse')
    x.initialize()
    # test set trainer
    trainer0 = gluon.Trainer([x], 'sgd')
    assert(x._trainer is trainer0)
    # test unset trainer
    x._set_trainer(None)
    assert(x._trainer is None)
    x._set_trainer(trainer0)
    # multiple trainers for a sparse Parameter is not allowed
    trainer1 = gluon.Trainer([x], 'sgd')
@with_seed()
def test_trainer():
    def dict_equ(a, b):
        assert set(a) == set(b)
        for k in a:
            assert (a[k].asnumpy() == b[k].asnumpy()).all()
    x = gluon.Parameter('x', shape=(10,))
    x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
    trainer = gluon.Trainer([x], 'sgd', {'learning_rate': 1.0, 'momentum': 0.5})
    with mx.autograd.record():
        for w in x.list_data():
            y = w + 1
            y.backward()
    trainer.step(1)
    assert trainer._optimizer.param_dict == trainer._optimizer.param_dict
    assert (x.data(mx.cpu(1)).asnumpy() == -2).all()
    x.lr_mult = 0.5
    with mx.autograd.record():
        for w in x.list_data():
            y = w + 1
            y.backward()
    trainer.step(1)
    assert (x.data(mx.cpu(1)).asnumpy() == -4).all()
    trainer.save_states('test_trainer.states')
    states = deepcopy(trainer._kvstore._updater.states) if trainer._update_on_kvstore \
             else deepcopy(trainer._updaters[0].states)
    trainer.load_states('test_trainer.states')
    if trainer._update_on_kvstore:
        dict_equ(trainer._kvstore._updater.states, states)
        assert trainer._optimizer == trainer._kvstore._updater.optimizer
        # invalid usage of update and allreduce_grads if update_on_kvstore
        assert_raises(AssertionError, trainer.update, 1)
        assert_raises(AssertionError, trainer.allreduce_grads)
    else:
        for updater in trainer._updaters:
            dict_equ(updater.states, states)
        assert trainer._optimizer == trainer._updaters[0].optimizer
    x = gluon.Parameter('x', shape=(10,))
    x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
    trainer2 = gluon.Trainer([x], 'sgd', {'learning_rate': 1.0, 'momentum': 0.5},
                             update_on_kvstore=False)
    with mx.autograd.record():
        for i, w in enumerate(x.list_data()):
            y = i*w
            y.backward()
    assert (x.grad(mx.cpu(0)).asnumpy() != x.grad(mx.cpu(1)).asnumpy()).all()
    trainer2.allreduce_grads()
    assert (x.grad(mx.cpu(0)).asnumpy() == x.grad(mx.cpu(1)).asnumpy()).all()
    trainer2.update(1)
    assert (x.data(mx.cpu(1)).asnumpy() == -1).all(), x.data(mx.cpu(1)).asnumpy()
@with_seed()
def test_trainer_save_load():
    previous_update_on_kvstore = os.getenv('MXNET_UPDATE_ON_KVSTORE', "1")
    os.putenv('MXNET_UPDATE_ON_KVSTORE', '1')
    x = gluon.Parameter('x', shape=(10,), lr_mult=1.0)
    x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
    trainer = gluon.Trainer([x], 'sgd', {'learning_rate': 0.1})
    with mx.autograd.record():
        for w in x.list_data():
            y = w + 1
            y.backward()
    trainer.step(1)
    assert trainer._kvstore._updater.optimizer._get_lr(0) == 0.1
    trainer.save_states('test_trainer_save_load.states')
    trainer.load_states('test_trainer_save_load.states')
    x.lr_mult = 2.0
    # check if parameter dict is correctly associated with optimizer after load_state
    assert trainer._kvstore._updater.optimizer._get_lr(0) == 0.2
    os.putenv('MXNET_UPDATE_ON_KVSTORE', previous_update_on_kvstore)
@with_seed()
def test_trainer_sparse_save_load():
    x = gluon.Parameter('x', shape=(10, 1), lr_mult=1.0, stype='row_sparse')
    x.initialize(ctx=[mx.cpu(0)], init='zeros')
    trainer = gluon.Trainer([x], 'sgd', {'learning_rate': 0.1})
    all_rows = mx.nd.arange(0, 10, ctx=mx.cpu(0))
    with mx.autograd.record():
        for w in x.list_row_sparse_data(all_rows):
            y = w * 1
            y.backward()
    trainer.step(1)
    assert trainer._kvstore._updater.optimizer._get_lr(0) == 0.1
    trainer.save_states('test_trainer_sparse_save_load.states')
    trainer.load_states('test_trainer_sparse_save_load.states')
    x.lr_mult = 2.0
    # check if parameter dict is correctly associated with optimizer after load_state
    assert trainer._kvstore._updater.optimizer._get_lr(0) == 0.2
@with_seed()
def test_trainer_multi_layer_init():
    class Net(gluon.Block):
        def __init__(self, **kwargs):
            super(Net, self).__init__(**kwargs)
            with self.name_scope():
                # sparse param
                self.embed_weight = self.params.get('embed_weight', stype='row_sparse',
                                                    shape=(4,3), grad_stype='row_sparse')
                # dense param from a hybrid block
                self.dense0 = nn.Dense(2)
        def forward(self, x):
            embed_weight = self.embed_weight.row_sparse_data(x)
            embed = mx.nd.Embedding(data=x, weight=embed_weight,
                                    input_dim=4, output_dim=3, sparse_grad=True)
            return self.dense0(embed)
    def check_init(ctxes):
        net = Net(prefix='net_')
        net.initialize(mx.init.One(), ctx=ctxes)
        trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 1})
        data = mx.nd.array([[0,2], [1,2]])
        xs = gluon.utils.split_and_load(data, ctxes)
        ys = []
        with mx.autograd.record():
            for x in xs:
                y = net(x)
                ys.append(y)
        for y in ys:
            y.backward()
        trainer.step(1)
        # all parameters should be initialized
        assert not trainer._params_to_init
        all_rows = mx.nd.arange(0, 4, ctx=mx.cpu(1))
        # check the updated weights
        weight = net.embed_weight.row_sparse_data(all_rows).asnumpy()
        assert (weight[0] == -1).all()
        assert (weight[1] == -1).all()
        assert (weight[2] == -3).all()
        assert (weight[3] == 1).all()
    check_init([mx.cpu(1), mx.cpu(2)])
    check_init([mx.cpu(1)])
@with_seed()
def test_trainer_reset_kv():
    def check_trainer_reset_kv(kv):
        params = gluon.ParameterDict()
        x = params.get('x', shape=(10,), lr_mult=1.0)
        params.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
        trainer = gluon.Trainer(params, 'sgd', {'learning_rate': 0.1}, kvstore=kv)
        params.save('test_trainer_reset_kv.params')
        with mx.autograd.record():
            for w in x.list_data():
                y = w + 1
                y.backward()
        trainer.step(1)
        assert trainer._kvstore.type == kv
        # load would reset kvstore
        mx.nd.waitall()
        params.load('test_trainer_reset_kv.params')
        if trainer._update_on_kvstore:
            # drop kvstore state if new parameters are loaded
            assert trainer._kvstore is None
            assert trainer._kv_initialized is False
        with mx.autograd.record():
            for w in x.list_data():
                y = w + 1
                y.backward()
        trainer.step(1)
        # the updated parameter should be based on the loaded checkpoint
        assert (x.data(mx.cpu()) == -0.2).asnumpy().all()
    kvs = ['local', 'device']
    for kv in kvs:
        check_trainer_reset_kv(kv)
@with_seed()
def test_trainer_sparse_kv():
    def check_trainer_sparse_kv(kv, stype, grad_stype, update_on_kv, expected):
        params = gluon.ParameterDict()
        x = params.get('x', shape=(10,1), lr_mult=1.0, stype=stype, grad_stype=grad_stype)
        params.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
        trainer = gluon.Trainer(params, 'sgd', {'learning_rate': 0.1},
                                kvstore=kv, update_on_kvstore=update_on_kv)
        all_rows = mx.nd.arange(0, 10, ctx=mx.cpu(0))
        try:
            ws = x.list_data() if stype == 'default' else x.list_row_sparse_data(all_rows)
            with mx.autograd.record():
                for w in ws:
                    y = w + 1
                    y.backward()
            trainer.step(1)
            assert trainer._kvstore.type == kv
            assert trainer._kv_initialized
            assert trainer._update_on_kvstore is expected
            # the updated parameter should be based on the loaded checkpoint
            mx.nd.waitall()
            updated_w = x.data(mx.cpu(0)) if stype == 'default' else x.row_sparse_data(all_rows)
            assert (updated_w == -0.2).asnumpy().all()
        except Exception as err:
            assert isinstance(err, expected)
    kvs = ['local', 'device']
    global_update_on_kvstore = bool(int(os.getenv('MXNET_UPDATE_ON_KVSTORE', "1")))
    for kv in kvs:
        check_trainer_sparse_kv(kv, 'default', 'default', True, True)
        check_trainer_sparse_kv(kv, 'default', 'default', False, False)
        check_trainer_sparse_kv(kv, 'default', 'default', None, global_update_on_kvstore)
        check_trainer_sparse_kv(kv, 'default', 'row_sparse', None, False)
        check_trainer_sparse_kv(kv, 'default', 'row_sparse', True, True)
        check_trainer_sparse_kv(kv, 'default', 'row_sparse', False, False)
        check_trainer_sparse_kv(kv, 'row_sparse', 'row_sparse', None, True)
        check_trainer_sparse_kv(kv, 'row_sparse', 'row_sparse', False, ValueError)
@with_seed()
def test_trainer_lr_sched():
    x = gluon.Parameter('x', shape=(10,))
    x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
    freq = 2
    factor = 0.1
    lr = 1
    lr_sched = mx.lr_scheduler.FactorScheduler(freq, factor=factor, base_lr=lr)
    trainer = gluon.Trainer([x], 'sgd', {'learning_rate': lr, 'lr_scheduler': lr_sched})
    for i in range(10):
        with mx.autograd.record():
            for w in x.list_data():
                y = w + 1
                y.backward()
        trainer.step(1)
        if i % freq == 0:
            assert trainer.learning_rate == lr, (lr, trainer.learning_rate, i)
            lr *= factor
    mx.nd.waitall()
    # Update on kvstore = False
    x = gluon.Parameter('x', shape=(10,))
    x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
    freq = 2
    factor = 0.1
    lr = 1
    lr_sched = mx.lr_scheduler.FactorScheduler(freq, factor=factor, base_lr=lr)
    trainer = gluon.Trainer([x], 'sgd', {'learning_rate': lr, 'lr_scheduler': lr_sched},
                            update_on_kvstore=False)
    for i in range(10):
        with mx.autograd.record():
            for w in x.list_data():
                y = w + 1
                y.backward()
        trainer.step(1)
        if i % freq == 0:
            assert trainer.learning_rate == lr, (lr, trainer.learning_rate, i)
            lr *= factor
    mx.nd.waitall()
 | |
| 
	
import enum
NAMESPACE_FIELD_XMLNS = "xmlns"
NAMESPACE_FIELD_XSI = "xsi"
NAMESPACE_FIELD_ATTRIB_CONTENT = "{0}:{1}".format(NAMESPACE_FIELD_XMLNS, NAMESPACE_FIELD_XSI)
TEST_TICKET_ID = "test"
SCRIPTED_CONDITION_TRUE = "<response><condition_result>true</condition_result></response>"
SCRIPTED_CONDITION_FALSE = "<response><condition_result>false</condition_result></response>"
SERVICE_OBJECT_TYPE_PREDEFINED = "PREDEFINED"
SERVICE_OBJECT_TYPE_PROTOCOL = "PROTOCOL"
SERVICE_OBJECT_TYPE_APPLICATION_IDENTITY = "APPLICATION_IDENTITY"
TYPE_ANY = "ANY"
TYPE_INTERNET = "INTERNET"
TYPE_ATTRIB = "type"
TYPE_DNS = "DNS"
TYPE_IP = "IP"
TYPE_OBJECT = "Object"
TYPE_HOST = "HOST"
TYPE_NETWORK = "NETWORK"
TYPE_OTHER = "OTHER"
TYPE_RANGE = "RANGE"
TYPE_LDAP_ENTITY = "LDAP_ENTITY"
XML_ATTRIB_IDENTIFIER = "_attribs"
XML_TAG_IDENTIFIER = "_xml_tag"
XSI_NAMESPACE_URL = "http://www.w3.org/2001/XMLSchema-instance"
class Attributes:
    ACL__BINDING = "acl__binding"
    BLOCK_CELL_VIOLATION = "blocked_cell_violation"
    BLOCK_ALL_CELL_VIOLATION = "blocked_all_cell_violation"
    BLOCK_ONLY_MATRIX_CELL_VIOLATION = "blocked_only_matrix_cell_violation"
    CLOUD_SECURITY_GROUP_NETWORK_OBJECT = "cloud_security_group"
    CONCRETE = "concrete"
    CUSTOM = "custom"
    DEVICE_NETWORK = "device_network"
    DEVICE_SERVICE = "device_service"
    DEVICE_ZONE = "zone"
    DNS = "dns"
    DOMAIN_NETWORK_OBJECT = "domain_network_object"
    FIELD_TYPE_APPROVE_REJECT = "approve_reject"
    FIELD_TYPE_CHECKBOX = "checkbox"
    FIELD_TYPE_DATE = "date"
    FIELD_TYPE_DROP_DOWN_LIST = "drop_down_list"
    FIELD_TYPE_HYPERLINK = "hyperlink"
    FIELD_TYPE_MANAGER = "manager"
    FIELD_TYPE_MULTIPLE_SELECTION = "multiple_selection"
    FIELD_TYPE_MULTI_ACCESS_REQUEST = "multi_access_request"
    FIELD_TYPE_MULTI_GROUP_CHANGE = "multi_group_change"
    FIELD_TYPE_MULTI_HYPERLINK = "multi_hyperlink"
    FIELD_TYPE_MULTI_NETWORK_OBJECT = "multi_network_object"
    FIELD_TYPE_MULTI_SERVICE = "multi_service"
    FIELD_TYPE_MULTI_SERVER_DECOMMISSION_REQUEST = "multi_server_decommission_request"
    FIELD_TYPE_MULTI_TARGET = "multi_target"
    FIELD_TYPE_MULTI_TEXT = "multi_text_field"
    FIELD_TYPE_MULTI_TEXT_AREA = "multi_text_area"
    FIELD_TYPE_RULE_DECOMMISSION = "rule_decommission"
    FIELD_TYPE_TEXT = "text_field"
    FIELD_TYPE_TEXT_AREA = "text_area"
    FIELD_TYPE_TIME = "time"
    FORTIGATE_NAT_INFO = "fortigateNatInfoDTO"
    GROUP = "group"
    HREF = "href"
    HOST_NETWORK_OBJECT = "host_network_object"
    HOST_NETWORK_OBJECT_WITH_INTERFACES = "host_network_object_with_interfaces"
    ICMP_SERVICE = "icmp_service"
    INTERFACE_TYPE = "interfaceDTO"
    INTERNET = "Internet"
    INTERNET_NETWORK_OBJECT = "internet_network_object"
    INSTALL_ON_NETWORK_OBJECT = "install_on_network_object"
    IP_SERVICE = "ip_service"
    NETWORK_OBJECT_GROUP = "network_object_group"
    NETWORK_OBJECT_TYPE_BASIC = "basicNetworkObjectDTO"
    NETWORK_OBJECT_TYPE_DOMAIN = "domainNetworkObjectDTO"
    NETWORK_OBJECT_TYPE_GROUP = "networkObjectGroupDTO"
    NETWORK_OBJECT_TYPE_HOST = "hostNetworkObjectDTO"
    NETWORK_OBJECT_TYPE_HOST_WITH_INTERFACES = "hostNetworkObjectWithInterfacesDTO"
    NETWORK_OBJECT_TYPE_RANGE = "rangeNetworkObjectDTO"
    NETWORK_OBJECT_TYPE_SUBNET = "subnetNetworkObjectDTO"
    NETWORK_OBJECT_TYPE_INTERNET = "internetNetworkObjectDTO"
    NETWORK_OBJECT_TYPE_CLOUD = "cloudSecurityGroupDTO"
    NETWORK_OBJECT_TYPE_VIRTUAL_SERVER = "networkObjectVirtualServerDTO"
    NETWORK_OBJECT_TYPE_VM_INSTANCE = "vmInstanceDTO"
    POLICY__BINDING = "policy__binding"
    POOL_MEMBER = "poolMember"
    POOL_MEMBERS = "poolMembers"
    PREDEFINED = "predefined"
    RANGE_NETWORK = "range_network"
    RANGE_NETWORK_OBJECT = "range_network_object"
    RANGE_SERVICE = "range_service"
    RESTRICTED_CELL_VIOLATION = 'restricted_cell_violation'
    SECURITY_ZONE_MATRIX = "securityZoneMatrixDTO"
    SECURITY_REQUIREMENT_TYPE_MATRIX = "matrixRequirementDTO"
    SECURITY_REQUIREMENT_TYPE_PCI = "pciRequirementDTO"
    SERVICE_GROUP = "service_group"
    SERVICE_TYPE_GROUP = "serviceGroupDTO"
    SERVICE_TYPE_SINGLE = "singleServiceDTO"
    SLIM_RULE_WITH_META_DATA = "slimRuleWithMetadataDTO"
    SUBNET = "subnet"
    SUBNET_NETWORK_OBJECT = "subnet_network_object"
    TRANSPORT_SERVICE = "transport_service"
    TYPE = ":type"
    USER_TYPE = "userObjectDTO"
    USER_TYPE_GROUP = "userObjectGroupDTO"
    VIOLATION_ANY_NETWORK_OBJECT = "any_network_object"
    VIOLATION_ANY_SERVICE = "any_service"
    VIOLATION_GROUP_NETWORK_OBJECT = "group_member_network_object"
    VIOLATION_IP_NETWORK_OBJECT = "ip_network_object"
    VIOLATION_GROUP_MEMBER_SERVICE_OBJECT = "group_member_service_object"
    VIOLATION_RANGE_NETWORK_OBJECT = "range_network_object"
    VIOLATION_SINGLE_NETWORK_OBJECT = "single_network_object"
    VIOLATION_SINGLE_SERVICE = "single_service"
    VIOLATION_SINGLE_SERVICE_OBJECT = "single_service_object"
    XSI_NAMESPACE_TYPE = "{{{0}}}type".format(XSI_NAMESPACE_URL)
    XSI_TYPE = "{0}:type".format(NAMESPACE_FIELD_XSI)
    ZONE = "zone"
    ZONE__BINDING = "zone__binding"
class Elements:
    CREATE_DATE = "createDate"
    ID = "id"
    REJECT_COMMENT = "reject_comment"
    TICKET_INFO = "ticket_info"
    UPDATE_DATE = "updateDate"
    CURRENT_STAGE = "current_stage"
    OPEN_REQUEST_STAGE = "open_request_stage"
    COMPLETION_DATA = "completion_data"
    ACCEPTINGRULESDTO = "acceptingRulesDTO"
    ACCESS_REQUEST = "access_request"
    ACCESS_REQUESTS = "access_requests"
    ACCESS_REQUEST_ID = "access_request_id"
    ACCESS_REQUEST_VERIFIER_RESULT = "access_request_verifier_result"
    ACCESS_TYPE = "access_type"
    ACL = "acl"
    ACL_NAME = "acl_name"
    ACTION = "action"
    ADDITIONAL_INFO = "additional_info"
    ADDITIONAL_PARAMETER = "additional_parameter"
    ADDITIONAL_PARAMETERS = "additional_parameters"
    ADDRESS = "address"
    ADDRESS_BOOK = "address_book"
    ADMIN = "admin"
    ADMIN_DOMAIN = "admin_domain"
    ADMINISTRATOR = "administrator"
    ALLOWED_SERVICE = "allowed_service"
    ALLOWED_SERVICES = "allowed_services"
    ANCESTOR_MANAGEMENT_ID = "ancestor_management_id"
    ANCESTOR_MANAGEMENT_NAME = "ancestor_management_name"
    ANCESTOR_MANAGEMENT_REVISION_ID = "ancestor_management_revision_id"
    ANY_ZONE = "any_zone"
    APP_NAME = "app_name"
    APPLICATION_NAME = "application_name"
    APP_OWNER = "app_owner"
    APPLICATION = "application"
    APPLICATION_ID = "application_id"
    APPLICATION_DETAILS = "application_details"
    APPLICATION_INTERFACE = "application_interface"
    APPLICATION_INTERFACES = "application_interfaces"
    APPLICATION_INTERFACE_ID = "application_interface_id"
    APPLICATION_PACK = "application_pack"
    APPLICATIONS = "applications"
    APPROVED = "approved"
    APPROVED_BY = "approved_by"
    ASSIGNED = "ASSIGNED"
    ASSIGNEE = "assignee"
    ASSIGNMENT = "assignment"
    ASSIGNEE_ID = "assignee_id"
    AUDITLOG = "auditLog"
    AUTHENTICATION_METHOD = "authentication_method"
    AUTHORIZATIONSTATUS = "authorizationStatus"
    AUTOMATIC = "automatic"
    AUTONAT = "autoNat"
    AVAILABILITY_ZONE = "availability_zone"
    BINDING = "binding"
    BINDING_NAME = "binding_name"
    BINDING_SUGGESTION = "binding_suggestion"
    BINDING_UID = "binding_uid"
    BINDINGS = "bindings"
    BINDING_AND_RULES = "binding_and_rules"
    BINDINGS_AND_RULES = "bindings_and_rules"
    BLOCKINGRULESDTO = "blockingRulesDTO"
    BLOCKSTART = "blockStart"
    BLOCKEND = "blockEnd"
    BLOCKED_SERVICE = "blocked_service"
    BLOCKED_SERVICES = "blocked_services"
    BNPP_REGION_REQUEST = "bnpp_region_request"
    BUSINESSOWNEREMAIL = "businessOwnerEmail"
    BUSINESSOWNERNAME = "businessOwnerName"
    BUSINESS_OWNERS = "business_owners"
    CHANGE_ACTION = "change_action"
    CHANGE_AUTHORIZATION = "change_authorization"
    CHANGE_AUTHORIZATION_BINDING = "change_authorization_binding"
    CHANGE_AUTHORIZATION_BINDINGS = "change_authorization_bindings"
    CHANGE_ID = "change_id"
    CHANGE_IMPLEMENTATION_STATUS = "change_implementation_status"
    CIDR = "cidr"
    CLASS_NAME = "class_name"
    CLEANUP = "cleanup"
    CLEANUPS = "cleanups"
    CLEANUP_SET = "cleanup_set"
    CLOUD_SECURITY_GROUP = "cloud_security_group"
    CLOUD_SECURITY_GROUPS = "cloud_security_groups"
    CODE = "code"
    COLOR = "color"
    COMMAND = "command"
    COMMANDS = "commands"
    COMMENT = "comment"
    COMMENTS = "comments"
    COMMUNITY = "community"
    COMMUNITIES = "communities"
    USE_TOPOLOGY = "use_topology"
    COMPLIANCE_POLICIES = "compliance_policies"
    COMPLIANCE_POLICY = "compliance_policy"
    COMPLIANCE_RULE = "compliance_rule"
    COMPLIANCE_RULES = "compliance_rules"
    CONNECTION = "connection"
    CONNECTION_EXTENDED = "connection_extended"
    CONNECTIONS_EXTENDED = "connections_extended"
    CONNECTIONS = "connections"
    CONNECTION_TO_APPLICATION = "connection_to_application"
    CONNECTION_TO_APPLICATIONS = "connection_to_applications"
    CONNECTIONS_TO_APPLICATIONS = "connections_to_applications"
    CONNECTION_TO_APPLICATION_PACK = "connection_to_application_pack"
    CONNECTION_TO_APPLICATION_PACKS = "connection_to_application_packs"
    CONNECTED_SERVERS = "connected_servers"
    CONTENT = "content"
    COUNT = "count"
    CP_INSPECT_STREAMING_NAME = "cp_inspect_streaming_name"
    CP_UID = 'cp_uid'
    CP_PROTOTYPE_NAME = "cp_prototype_name"
    CREATED = "created"
    CREATED_BY = "created_by"
    CREATION_DATE = "creation_date"
    CURRENT_STEP = "current_step"
    CUSTOMER = "customer"
    CUSTOMER_ID = "customer_id"
    CUSTOMERS = "customers"
    DATE = "date"
    DCR_PRODUCT = "dcr_product"
    DCR_TEST_CONCRETE = "dcr_test_concrete"
    DCR_TEST_GROUP = "dcr_test_group"
    DCR_TEST = "dcr_test"
    DCR_TESTS = "dcr_tests"
    DECOMMISSIONED = "decommissioned"
    DEFAULT = "default"
    DESCRIPTION = "description"
    DESIGNER_COMMAND = "designer_command"
    DESIGNER_COMMANDS = "designer_commands"
    DESIGNER_RESULT = "designer_result"
    DESIGNER_RESULTS = "designer_results"
    DESTINATION = "destination"
    DESTINATION_DOMAIN = "destination_domain"
    DESTINATIONS = "destinations"
    DESTINATIONS_IN_ZONE = "destinations_in_zone"
    DEST_NETWORKS_NEGATED = "dest_networks_negated"
    DESTINATIONSERVICES = "destinationServices"
    DEST_NETWORK_COLLECTION = "dest_network_collection"
    DESTNETWORKS = "destinationNetworks"
    DESTNEGATED = "destNegated"
    DEST_SERVICES_NEGATED = "dest_services_negated"
    DEVICE = "device"
    DEVICE_NAME = "name"
    DEVICE_INFO = "device_info"
    DEVICE_TYPE = "device_type"
    DEVICES = "devices"
    DEVICES_AND_BINDINGS = "devices_and_bindings"
    DEVICE_AND_BINDINGS = "device_and_bindings"
    DEVICE_CONFIG = "device_config"
    DEVICE_ID = "device_id"
    DEVICE_IDS = "device_ids"
    DEVICE_SUGGESTION = "device_suggestion"
    DEVICE_ZONES = "zones"
    DEVICE_ZONE = "zone"
    DEVICE_ADDED_NETWORK_OBJECT = "device_added_network_object"
    DEVICE_ADDED_SERVICE_OBJECT = "device_added_service_object"
    SOURCE_OBJECT = "source_object"
    SOURCE_OBJECTS = "source_objects"
    DESTINATION_OBJECT = "destination_object"
    DESTINATION_OBJECTS = "destination_objects"
    DIRECTION = "direction"
    DISABLED = "disabled"
    DISPLAYNAME = "displayName"
    DISPLAY_NAME = "display_name"
    DM_INLINE_MEMBRES = "DM_INLINE_members"
    DNSADDRESS = "dnsAddress"
    DNS_IP_ADDRESSES = 'dns_ip_addresses'
    DST = "dst"
    DOCUMENTATION = "documentation"
    RULE_DOCUMENTATION = "rule_documentation"
    DOMAIN = "domain"
    DOMAINS = "domains"
    DOMAIN_ID = "domain_id"
    DOMAIN_NAME = "domain_name"
    DST_NAT_METHOD = "dstNatMethod"
    DST_NETWORK = "dst_network"
    DST_NETWORKS = "dst_networks"
    DST_SERVICE = "dst_service"
    DST_SERVICES = "dst_services"
    DST_ZONE = "dst_zone"
    EDIT_MODE = "edit_mode"
    EDITORS = "editors"
    EDITOR = "editor"
    EMAIL = "email"
    ENABLE_NET_4_TO_NET_6 = "enable_net4tonet6"
    ENABLE_ROUTE_LOOKUP = "enable_route_lookup"
    ENGRESS_INTERFACE = "egress_interface"
    ENFORCEDON = "enforcedOn"
    EXCLUDE_ANY = "exclude_any"
    EXCLUSION = "exclusion"
    EXCLUSIONS = "exclusions"
    EXEMPTED_TRAFFIC = "exempted_traffic"
    EXEMPTED_TRAFFIC_LIST = "exempted_traffic_list"
    EXPIRATION_DATE = "expiration_date"
    EXPIRATION_FIELD_NAME = "expiration_field_name"
    EXPIREDATE = "expireDate"
    EXPRESSION = "expression"
    EXTERNAL = "external"
    FIELD = "field"
    FIELDS = "fields"
    FIRST_IP = "first_ip"
    FIRST_NAME = "first_name"
    FORTI_VIP = "forti_vip"
    FROM = "from"
    FLOW = "flow"
    FLOW_DESTINATION = 'flow_destination'
    FLOW_DESTINATION_VIOLATIONS = "flow_destination_violations"
    FLOW_DESTINATIONS = 'flow_destinations'
    FLOW_SOURCE = "flow_source"
    FLOW_SOURCE_VIOLATIONS = "flow_source_violations"
    FLOW_SOURCES = "flow_sources"
    FROM_DOMAIN = "from_domain"
    FROM_ZONE = "from_zone"
    F5_DEVICE_NAME = "f5_device_name"
    GENERIC_DEVICES = "generic_devices"
    GLOBAL = "global"
    GROUP = "group"
    GROUP_CHANGE = "group_change"
    GROUP_ID = "group_id"
    GROUP_IDS = "group_ids"
    GROUP_MEMBER = "group_member"
    GROUP_MEMBER_PATH = "group_member_path"
    GROUPPERMISSION = "groupPermission"
    GROUPPERMISSIONS = "groupPermissions"
    GROUPID = "groupId"
    GUICLIENT = "guiClient"
    HANDLE_IMPLICIT_CLEANUP_RULE = "handled_by_implicit_cleanup_rule"
    HOST_NAME = "host_name"
    HYPERLINK = "hyperlink"
    ID = "id"
    IMPLEMENTATION_PERCENTAGE_THRESHOLD = "implementation_percentage_threshold"
    IMPLEMENTATION_STATUS = "implementation_status"
    IMPLEMENTING_RULE = "implementing_rule"
    IMPLEMENTING_RULES = "implementing_rules"
    IMPLEMENTS_ACCESS_REQUESTS = "implements_access_requests"
    IMPLICIT = "implicit"
    INCOMING_INTERFACE_NAME = "incoming_interface_name"
    INCOMINGINTERFACES = "incomingInterfaces"
    INCOMINGVRF = "incomingVrf"
    INDOMAINELEMENTID = "inDomainElementId"
    INPUT = "input"
    INSTALL = "install"
    INSTALLABLE_TARGET = "installable_target"
    INSTALL_ON = "install_on"
    INSTALL_ONS = "install_ons"
    INSTALLED_ON_MODULE = "installed_on_module"
    INSTANCE_ID = "instance_id"
    INSTANCES_TOTAL = "instances_total"
    INSTRUCTION = "instruction"
    INSTRUCTION_TYPE = "instruction_type"
    INSTRUCTIONS = "instructions"
    INTERFACE = "interface"
    INTERFACES = "interfaces"
    INTERFACE_FOR_NETWORK_OBJECT = "interface_for_network_object"
    INTERFACE_CONNECTION = "interface_connection"
    INTERFACE_CONNECTIONS = "interface_connections"
    INTERFACE_IP = "interface_ip"
    INTERFACE_IPS = "interface_ips"
    INTERFACE_MAPPING = "interface_mapping"
    INTERFACE_MAPPINGS = "interface_mappings"
    INTERFACE_NAME = "interface_name"
    INTERNET = "internet"
    INTERNET_REFERRAL_OBJECT = "internet_referral_object"
    INTERVAL = "interval"
    IP = "ip"
    IPSECLIST = "ipsecList"
    IP_ADDRESS = "ip_address"
    IP_TYPE = "ip_type"
    IS_ANY = "is_any"
    ISACTIVE = "isActive"
    ISCUSTOM = "isCustom"
    ISDEFAULT = "isDefault"
    ISDISABLED = "is_disabled"
    ISMANDATORY = "isMandatory"
    IS_PUBLISHED = "is_published"
    ITG = "itg"
    ITG_ID = "itg_id"
    KEY = "key"
    LABEL = "label"
    LABELS = "labels"
    LAST_HIT = "last_hit"
    LAST_IP = "last_ip"
    LAST_MODIFIED = "last_modified"
    LAST_NAME = "last_name"
    LDAP_ENTITY_ID = "ldap_entity_id"
    LDAP_ENTITY_DN = "ldap_entity_dn"
    LDAP_ENTITY_NAME = "ldap_entity_name"
    LDAPDN = "ldapDn"
    LDAP_CONFIGURATION = "ldap_configuration"
    LEGACY_RULE = "legacy_rule"
    LEVEL = "level"
    LINK = "link"
    MANAGEMENTID = "managementId"
    MANAGEMENT_DOMAIN = "management_domain"
    MANAGEMENT_ID = "management_id"
    MANAGEMENT_IP = "management_ip"
    MANAGEMENT_NAME = "management_name"
    MANAGEMENT_TYPE = "management_type"
    MASK = "mask"
    MAPPED_IP = "mapped-ip"
    MAPPED_IP_MAX = "mapped-ip-max"
    MATCH_FOR_ANY = "match_for_any"
    MATCH_RULE = "match_rule"
    MATRIX_CELL_VIOLATION = "matrix_cell_violation"
    MAX = "max"
    MAX_PROTOCOL = "max_protocol"
    MAX_VALUE_SOURCE = "max_value_source"
    MAXIP = "maxIp"
    MAX_IP = "max_ip"
    MAX_ICMP_TYPE = "max_icmp_type"
    MAX_PORT = "max_port"
    MEMBER = "member"
    MEMBER_OF = "member_of"
    MEMBERS = "members"
    MESSAGE = "message"
    MGMT_ANY = "mgmt_any"
    MGMT = "mgmt"
    MGMTS = "mgmts"
    MGMT_ID = "mgmt_id"
    MGMT_NAME = "mgmt_name"
    MIN = "min"
    MINIP = "minIp"
    MIN_IP = "min_ip"
    MIN_ICMP_TYPE = "min_icmp_type"
    MIN_PORT = "min_port"
    MIN_PROTOCOL = "min_protocol"
    MIN_VALUE_SOURCE = "min_value_source"
    MODEL = "model"
    MODIFIED = "modified"
    MODIFIED_OBJECT_NAME = "modified_object_name"
    MODULE = "module"
    MODULE_AND_POLICY = "module_and_policy"
    MODULES_AND_POLICY = "modules_and_policy"
    MPLSINPUTLABEL = "mplsInputLabel"
    MPLSOUTPUTLABEL = "mplsOutputLabel"
    MULTI_ACCESS_REQUESTS = "multi_access_requests"
    MULTI_GROUP_CHANGE = "multi_group_change"
    MUSTCONTAIN = "mustContain"
    NAME = "name"
    NAT_INFO = "nat_info"
    NATLIST = "natList"
    NAT_RULE = "nat_rule"
    NAT_RULES = "nat_rules"
    NEGATE = "negate"
    NEGATED = "negated"
    NETMASK = "netmask"
    NETWORK = "network"
    NETWORK_ADDRESS = "network_address"
    NETWORK_COLLECTION = "network_collection"
    NETWORK_IP = "network_ip"
    NETWORK_ITEM = "network_item"
    NETWORK_ITEMS = "network_items"
    NETWORK_MASK = "network_mask"
    NETWORK_NAME = "network_name"
    NETWORK_OBJECT = "network_object"
    NETWORK_OBJECTS = "network_objects"
    NETWORKS = "networks"
    NETWORK_UID = "network_uid"
    NEW_REVISION = "new_revision"
    NEW_RULE = "new_rule"
    NEW_RULES = "new_rules"
    NEW_RULES_VIOLATED_TRAFFIC = "new_rules_violated_traffic"
    NEXT_HOP_IP = "next_hop_ip"
    NEXTHOPIP = "nextHopIp"
    NEXTDEVICES = "nextDevices"
    NOT_ALLOWED_SERVICE = "not_allowed_service"
    NOT_ALLOWED_SERVICES = "not_allowed_services"
    NOT_BLOCKED_SERVICE = "not_blocked_service"
    NOT_BLOCKED_SERVICES = "not_blocked_services"
    NOTES = "notes"
    NUMBER = "number"
    NUMBER_OF_RULES = "number_of_rules"
    OBJECT = "object"
    OBJECT_DETAILS = "object_details"
    OBJECT_NAME = "object_name"
    OBJECTNAMES = "objectNames"
    OBJECT_TYPE = "object_type"
    OBJECT_UID = "object_UID"
    OBJECT_UPDATED_STATUS = "object_updated_status"
    OFFLINE = "offline"
    OFFLINE_DEVICE = "offline_device"
    OLD_REVISION = "old_revision"
    OLD_RULE = "old_rule"
    OLD_RULES = "old_rules"
    OLD_RULES_VIOLATED_TRAFFIC = "old_rules_violated_traffic"
    OPEN_TICKETS = "open_tickets"
    OPTIONS = "options"
    OPTION = "option"
    ORDER = "order"
    ORIG_DST_NETWORK = "orig_dst_network"
    ORIG_SRC_NETWORK = "orig_src_network"
    ORIG_SERVICE = "orig_service"
    ORIGIN = "origin"
    ORIGINAL_INSTANCE_ID = "original_instance_id"
    ORIGINALIPS = "originalIps"
    ORIGINALSERVICES = "originalServices"
    OUTGOING_INTERFACE_NAME = "outgoing_interface_name"
    OUTGOINGINTERFACE = "outgoingInterface"
    OUTGOINGINTERFACENAME = "outgoingInterfaceName"
    OUTGOINGVRF = "outgoingVrf"
    OUT_OF_OFFICE_FROM = "out_of_office_from"
    OUT_OF_OFFICE_UNTIL = "out_of_office_until"
    OWNER = "owner"
    OWNER_USER_ID = "owner_user_id"
    OWNER_DISPLAY_NAME = "owner_display_name"
    PATH = "path"
    PATH_CALC_RESULTS = "path_calc_results"
    PARENT_ID = "parent_id"
    PARTIAL_LIST = "partial_list"
    PARTICIPANT_USERNAME = "participant_username"
    PARTICIPANT_ID = "participant_id"
    PARTICIPANTS = "participants"
    PARTICIPATINGGATEWAYS = "participatingGateways"
    PAYLOAD = "payload"
    PEER = "peer"
    PERMISSIVENESS_LEVEL = "permissiveness_level"
    PHONE = "phone"
    POLICIES = "policies"
    POLICY = "policy"
    POLICYPACKAGE = "policyPackage"
    POLICYRULENUMBER = "policyRuleNumber"
    POLICY_ANALYSIS_QUERY_RESULT = "policy_analysis_query_result"
    POLICY_NAME = "policy_name"
    POLICY_CONTROL_NAME = "policy_control_name"
    POLICY_ZONE_PAIR = "policy_zone_pair"
    POOL_MEMBER = "pool_member"
    POOL_MEMBERS = "pool_members"
    PORT = "port"
    PORT_FROM = "port_from"
    PERCENT_IMPLEMENTED = "percent_implemented"
    PERFORMED_BY = "performed_by"
    PRECEDENCE = "precedence"
    PREDEFINED_NAME = "predefined_name"
    PREDEFINED_SERVICE_ID = "predefined_service_id"
    PREDEFINED_SERVICE_NAME = "predefined_service_name"
    PREDEFINED_SERVICE_RANGE = "predefined_service_range"
    PREDEFINED_SERVICE_RANGES = "predefined_service_ranges"
    PRENAT_IFACE = "prenat_iface"
    POSTNAT_IFACE = "postnat_iface"
    PREFIX = "prefix"
    PRIMARY = "primary"
    PRIORITY = "priority"
    PRODUCTS = "products"
    PROTOCOL = "protocol"
    PUSH_STATUS = "push_status"
    QUERY_PARAMS = "query_params"
    RANGE_FIRST_IP = "range_first_ip"
    RANGE_LAST_IP = "range_last_ip"
    READY = "ready"
    READ_ONLY = "read_only"
    REASON = "reason"
    REASSIGN_TASK_COMMENT = "reassign_task_comment"
    RECIPIENT = "recipient"
    RECIPIENTS = "recipients"
    RECIPIENTS_ANY = "recipients_any"
    RECORD_SET = "record_set"
    REDONE = "redone"
    REDO_STEP_COMMENT = "redo_step_comment"
    REFERENCED = "referenced "
    REGION = "region"
    REMEDIATION = "remediation"
    REPORT = "report"
    REPORTS = "reports"
    RESULT = "result"
    REQUESTED_BY = "requested_by"
    REQUESTER = "requester"
    REQUESTER_ID = "requester_id"
    REVISION = "revision"
    REVISION_NUMBER = "revision_number"
    REVISION_ID = "revision_id"
    REVISIONID = "revisionId"
    REVISIONS = "revisions"
    RISK = "risk"
    RISK_ANALYSIS_RESULT = "risk_analysis_result"
    ROLE = "role"
    ROLES = "roles"
    ROUTE = "route"
    ROUTEDESTINATION = "routeDestination"
    ROUTES = "routes"
    RULE = "rule"
    RULES = "rules"
    RULE_COUNT = "rule_count"
    RULE_DESCRIPTION = "rule_description"
    RULE_ID = "rule_id"
    RULEIDENTIFIER = "ruleIdentifier"
    RULE_LOCATION = "rule_location"
    RULE_METADATA = "rule_metadata"
    RULE_NUMBER = "rule_number"
    RULE_ORDER = "rule_order"
    RULE_PLACMENT = "rule_placement"
    RULE_TYPE = "rule_type"
    RULE_VIOLATED_TRAFFIC = "rule_violated_traffic"
    RULENUMBER = "ruleNumber"
    RULE_PROPERTIES = "rule_properties"
    RULE_PROPERTY = "rule_property"
    RULE_PROPERTIES_VIOLATIONS = "rule_properties_violations"
    RULE_PROPERTY_VIOLATION = "rule_property_violation"
    RULE_TEXT = "textual_rep"
    SATELLITEGATEWAYS = "satelliteGateways"
    SECURE_APP_APPLICATION = "secure_app_application"
    SEQNUMBER = "seqNumber"
    SCORE = "score"
    SCHEDULING = "scheduling"
    SECURITY_GROUP = "security_group"
    SECURITY_GROUPS = "security_groups"
    SECURITY_POLICY = "securityPolicy"
    SECURITY_POLICY_LIST = "SecurityPolicyList"
    SECURITY_POLICY_DEVICE_VIOLATIONS = "security_policy_device_violations"
    SECURITY_POLICY_EXCEPTION = "security_policy_exception"
    SECURITY_POLICY_EXCEPTION_LIST = "security_policy_exception_list"
    SECURITY_POLICY_VIOLATION = "security_policy_violation"
    SECURITY_POLICY_VIOLATIONS = "security_policy_violations"
    SECURITY_REQUIREMENT = "security_requirement"
    SECURITY_REQUIREMENTS = "security_requirements"
    SECURITY_RULE_COUNT = "security_rule_count"
    SECURITY_ZONE_MATRIX = "security_zone_matrix"
    SELECTED_OPTION = "selected_option"
    SELECTED_OPTIONS = "selected_options"
    SELECTION = "selection"
    SEND_EMAIL = "send_email"
    SERVER = "server"
    SERVERS = "servers"
    SERVER_DECOMMISSION_REQUEST = "server_decommission_request"
    SERVICE = "service"
    SERVICENEGATED = "serviceNegated"
    SERVICES = "services"
    SERVICE_COLLECTION = "service_collection"
    SERVICE_ITEM = "service_item"
    SERVICE_ITEMS = "service_items"
    SERVICE_NAME = "service_name"
    SERVICE_UID = "service_uid"
    SERVICENATMETHOD = "serviceNatMethod"
    SEVERITY = "severity"
    SHADOWED_RULE = "shadowed_rule"
    SHADOWED_RULES = "shadowed_rules"
    SHADOWED_RULES_CLEANUP = "shadowed_rules_cleanup"
    SHADOWED_STATUS = "shadowed_status"
    SHADOWING_RULES = "shadowing_rules"
    SHARED = "shared"
    SKIPPED = "skipped"
    SLA_OUTCOME = "sla_outcome"
    SLA_STATUS = "sla_status"
    SOURCE = "source"
    SOURCES = "sources"
    SOURCEIP = "sourceIp"
    SOURCE_DOMAIN = "source_domain"
    SOURCE_NETWORK_COLLECTION = "source_network_collection"
    SOURCES_IN_ZONE = "sources_in_zone"
    SOURCENETWORKS = "sourceNetworks"
    SOURCENEGATED = "sourceNegated"
    SOURCESERVICES = "sourceServices"
    SRC = "src"
    SRC_NETWORK = "src_network"
    SRC_NETWORKS = "src_networks"
    SRC_NETWORKS_NEGATED = "src_networks_negated"
    SRC_SERVICES_NEGATED = "src_services_negated"
    SRC_ZONE = "src_zone"
    SRCNATMETHOD = "srcNatMethod"
    STATUS = "status"
    STEP = "step"
    STEP_NAME = "step_name"
    STEPS = "steps"
    SUB_POLICY = "sub_policy"
    SUB_POLICY_GLOBAL = "sub_policy_global"
    SUB_POLICY_SHARED = "sub_policy_shared"
    SUB_POLICY_UID = "sub_policy_uid"
    SUB_POLICY_NAME = "sub_policy_name"
    SUBNET = "subnet"
    SUBNET_MASK = "subnet_mask"
    SUBJECT = "subject"
    SUGGESTIONS_PER_BINDING = "suggestions_per_binding"
    SUGGESTIONS_PER_DEVICE = "suggestions_per_device"
    TAG = "tag"
    TAG_SERVERS = "tag_servers"
    TAGS = "tags"
    TAGS_SERVERS = "tags_servers"
    TARGET = "target"
    TARGETS = "targets"
    TASK = "task"
    TASK_NAME = "task_name"
    TASKS = "tasks"
    TECH_OWNER = "tech_owner"
    TESTDEF = "testDef"
    TESTDEFUID = "testDefUid"
    TESTPARAMS = "testParams"
    TEST_PRODUCTS = "test_products"
    TESTUID = "testUid"
    TEXT = "text"
    TEXT_AREA = "text_area"
    TEXT_FIELD = "text_field"
    TICKET = "ticket"
    TICKETS = "tickets"
    TICKET_HISTORY_ACTIVITIES = "ticket_history_activities"
    TICKET_HISTORY_ACTIVITY = "ticket_history_activity"
    TICKET_ID = "ticket_id"
    TICKET_IDS = "ticket_ids"
    TICKETCR = "ticketCr"
    TIME = "time"
    TIMEOUT = "timeout"
    TOPOLOGY = "topology"
    TO = "to"
    TO_DOMAIN = "to_domain"
    TO_ZONE = "to_zone"
    TOTAL = "total"
    TOPOLOGY_CLOUD = "topology_cloud"
    TOPOLOGY_CLOUDS = "topology_clouds"
    TRACK = "track"
    TRACK_LEVEL = "track_level"
    TRACK_INTERVAL = "track_interval"
    TRAFFIC_RANGE = "traffic_range"
    TRAFFIC_ALLOWED = "traffic_allowed"
    TRANSLATEDIPS = "translatedIps"
    TRANSLATEDSERVICES = "translatedServices"
    TRANSLATED_SERVICE = "translated_service"
    TRANSLATED_DST_NETWORK = "translated_dst_network"
    TRANSLATED_SRC_NETWORK = "translated_src_network"
    TYPE = "type"
    TYPE_ON_DEVICE = "type_on_device"
    UID = "uid"
    UNAUTHORIZED_OPENED_ACCESS = "unauthorized_opened_access"
    UNAUTHORIZED_CLOSED_ACCESS = "unauthorized_closed_access"
    UNIQUE_ACTIVE_IN_ITG = "unique_active_in_itg"
    UPDATE_RULE = "update_rule"
    URL = "url"
    USER = "user"
    USER_ID = "user_id"
    USER_NETWORKS = "userNetworks"
    USERS = "users"
    USAGE_MODE = "usage_mode"
    VALUE = "value"
    VENDOR = "vendor"
    VENDOR_NAME = "vendor_name"
    VERIFIED = "verified"
    VERIFICATION_STATUS = "verification_status"
    VERIFIER_BINDING = "verifier_binding"
    VERIFIER_BINDINGS = "verifier_bindings"
    VERIFIER_RESULT = "verifier_result"
    VERIFIER_TARGET = "verifier_target"
    VERIFIER_TARGETS = "verifier_targets"
    VERIFIER_WARNING = "verifier_warning"
    VERSION_ID = "version_id"
    VIEWER = "viewer"
    VIEWERS = "viewers"
    VISIBILITY = "visibility"
    VIOLATED_TRAFFIC = "violated_traffic"
    VIOLATING_SERVICE = "violating_service"
    VIOLATING_SERVICES = "violating_services"
    VIOLATION = "violation"
    VIOLATIONS = "violations"
    VIOLATING_OBJECT = "violating_object"
    VIOLATING_RULE = "violating_rule"
    VIOLATING_RULES = "violating_rules"
    VIRTUAL_IP = "virtual_ip"
    VIRTUAL_ROUTER = "virtual_router"
    VM_INSTANCE = "vm_instance"
    VM_INSTANCES = "vm_instances"
    VPN = "vpn"
    VPNCONNECTION = "vpnConnection"
    WORKFLOW = "workflow"
    ZONE = "zone"
    ZONES = "zones"
    ZONE_ID = "zone_id"
    ZONEID = "zoneId"
    ZONE_ENTRY = "zone_entry"
    ZONE_ENTRIES = "zone_entries"
    ZONE_NAME = "zone_name"
    ZONE_NAME_IN_PARENT = "zone_name_in_parent"
    ZONE_TO_ZONE_SECURITY_REQUIREMENT = "zone_to_zone_security_requirement"
@enum.unique
class SeverityLevels(enum.Enum):
    CRITICAL = "CRITICAL"
    HIGH = "HIGH"
    MEDIUM = "MEDIUM"
    LOW = "LOW"
    @staticmethod
    def from_string(level_string):
        """
        Get enum from string
        :type level_string: str
        :return:
        """
        try:
            level = [level for level in (SeverityLevels.CRITICAL, SeverityLevels.HIGH,
                                         SeverityLevels.MEDIUM, SeverityLevels.LOW) if level.value == level_string][0]
        except IndexError:
            raise ValueError("String {} doesn't represent any of the severity levels".format(level_string))
        else:
            return level
    def _compare_with_enum(self, level):
        """
        Check if greater than other provided violation severity level
        :type level: SeverityLevels
        :return:
        """
        if self == SeverityLevels.CRITICAL and level == SeverityLevels.CRITICAL:
            return False
        elif self == SeverityLevels.HIGH and level in (SeverityLevels.HIGH, SeverityLevels.CRITICAL):
            return False
        elif self == SeverityLevels.MEDIUM and level in (SeverityLevels.CRITICAL,
                                                         SeverityLevels.HIGH, SeverityLevels.MEDIUM):
            return False
        if self == SeverityLevels.LOW:
            return False
        return True
    def __gt__(self, level):
        """
        Implementation of greater
        :param level:
        :return:
        """
        if isinstance(level, str):
            level = SeverityLevels.from_string(level)
            return self._compare_with_enum(level)
        elif isinstance(level, SeverityLevels):
            return self._compare_with_enum(level)
        else:
            raise TypeError("Wrong type to compare")
 | |
| 
	#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Extract information from several wordnet formats
Latest version can be found at https://github.com/letuananh/yawlib
@author: Le Tuan Anh <[email protected]>
'''
# Copyright (c) 2016, Le Tuan Anh <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__author__ = "Le Tuan Anh <[email protected]>"
__copyright__ = "Copyright 2016, yawlib"
__credits__ = []
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Le Tuan Anh"
__email__ = "<[email protected]>"
__status__ = "Prototype"
import sys
import os.path
import argparse
import itertools
import logging
import csv
from collections import defaultdict as dd
from collections import namedtuple
from difflib import ndiff, unified_diff
from operator import itemgetter
from lxml import etree
from fuzzywuzzy import fuzz
from chirptext.leutile import StringTool
from chirptext.leutile import Counter
from chirptext.leutile import Timer
from chirptext.leutile import uniquify
from chirptext.leutile import header
from chirptext.leutile import FileHelper
from yawlib import YLConfig
from yawlib import SynsetID
from yawlib.helpers import dump_synset
from yawlib.helpers import dump_synsets
from yawlib.helpers import get_gwn, get_wn, get_gwnxml
from yawlib.helpers import config_logging, add_logging_config
from yawlib.helpers import add_wordnet_config
from yawlib.helpers import show_info
from yawlib import XMLGWordNet
from yawlib import SQLiteGWordNet
from yawlib import WordnetSQL as WSQL
try:
    from fuzzywuzzy import fuzz
except Exception as e:
    logging.warning("fuzzywuzzy is not installed")
    pass
#-----------------------------------------------------------------------
# CONFIGURATION
#-----------------------------------------------------------------------
# >>> WARNING: Do NOT change these values here. Change config.py instead!
#
WORDNET_30_PATH          = YLConfig.WORDNET_30_PATH
WORDNET_30_GLOSSTAG_PATH = YLConfig.WORDNET_30_GLOSSTAG_PATH
WORDNET_30_GLOSS_DB_PATH = YLConfig.WORDNET_30_GLOSS_DB_PATH
DB_INIT_SCRIPT           = YLConfig.DB_INIT_SCRIPT
MOCKUP_SYNSETS_DATA      = FileHelper.abspath('data/test.xml')
GLOSSTAG_NTUMC_OUTPUT    = FileHelper.abspath('data/glosstag_ntumc')
GLOSSTAG_PATCH           = FileHelper.abspath('data/glosstag_patch.xml')
glosstag_files = lambda x : [
    os.path.join(x, 'adv.xml')
    ,os.path.join(x, 'adj.xml')
    ,os.path.join(x, 'verb.xml')
    ,os.path.join(x, 'noun.xml')
    ]
MERGED_FOLDER            = os.path.join(WORDNET_30_GLOSSTAG_PATH , 'merged')
GLOSSTAG_XML_FILES       = glosstag_files(MERGED_FOLDER)
MISALIGNED               = FileHelper.abspath('data/misaligned.xml')
#-----------------------------------------------------------------------
def glosstag2ntumc(args):
    print("Extracting Glosstag to NTU-MC")
    show_info(args)
    print("To be developed")
    pass
def export_wn_synsets(args):
    '''Extract information from different wordnets to compare'''
    if args.source == 'gloss':
        export_gwnsql_synsets(args)
    else:
        export_wnsql_synsets(args)
def export_gwnsql_synsets(args):
    print("Exporting synsets' info (lemmas/defs/examples) from GlossWordNet (SQLite) to text file")
    show_info(args)
    output_with_sid_file = os.path.abspath('./data/glosstag_lemmas.txt')
    output_without_sid_file = os.path.abspath('./data/glosstag_lemmas_noss.txt')
    output_defs = os.path.abspath('./data/glosstag_defs.txt')
    output_exes = os.path.abspath('./data/glosstag_exes.txt')
    gwn = get_gwn(args)
    # Extract synsets' lemmas, definitions and examples
    if args.mockup:
        synsets = get_gwnxml(args).synsets
    else:
        synsets = gwn.all_synsets()
    synsets.synsets.sort(key=lambda x: x.sid.to_canonical())
    with open(output_defs, 'w') as def_file, open(output_exes, 'w') as ex_file, open(output_with_sid_file, 'w') as with_sid, open(output_without_sid_file, 'w') as without_sid:
        # synsets = gwn.get_synsets_by_ids(['01828736-v', '00001740-r'])
        for ss in synsets:
            for t in sorted(ss.terms, key=lambda x: x.term):
                with_sid.write('%s\t%s\n' % (ss.sid.to_canonical(), t.term))
                without_sid.write('%s\n' % (t.term,))
            for gloss in ss.glosses:
                if gloss.cat == 'def':
                    def_file.write('{sid}\t{d}\n'.format(sid=ss.sid, d=gloss.text()))
                elif gloss.cat == 'ex':
                    ex_file.write('{sid}\t{ex}\n'.format(sid=ss.sid, ex=gloss.text()))
    # summary
    print("Data has been extracted to:")
    print("  + {}".format(output_with_sid_file))
    print("  + {}".format(output_without_sid_file))
    print("  + {}".format(output_defs))
    print("  + {}".format(output_exes))
    print("Extracted synsets: {}".format(len(synsets)))
    print("Done!")
def export_wnsql_synsets(args):
    print("Exporting synsets' info (lemmas/defs/examples) from WordnetSQL (Princeton Wordnet 3.0) to text file")
    show_info(args)
    output_with_sid_file = os.path.abspath('./data/wn30_lemmas.txt')
    output_without_sid_file = os.path.abspath('./data/wn30_lemmas_noss.txt')
    output_defs = os.path.abspath('./data/wn30_defs.txt')
    output_exes = os.path.abspath('./data/wn30_exes.txt')
    wn = get_wn(args)
    # Extract lemmas
    records = wn.get_all_synsets()
    synsets_lemmas = []
    for r in records:
        synsets_lemmas.append((SynsetID.from_string(str(r.synsetid)).to_canonical(), r.lemma))
    synsets_lemmas.sort(key=itemgetter(0, 1))
    with open(output_with_sid_file, 'w') as with_sid, open(output_without_sid_file, 'w') as without_sid:
        for row in synsets_lemmas:
            with_sid.write('%s\t%s\n' % row)
            without_sid.write('%s\n' % (row[1],))  # just the lemma
    # Extract synset definitions
    records = wn.schema.ss.select(orderby='synsetid')
    synsets_defs = []
    for r in records:
        synsets_defs.append((SynsetID.from_string(r.synsetid).to_canonical(), r.definition))
    synsets_defs.sort(key=itemgetter(0))
    with open(output_defs, 'w') as def_file:
        for row in synsets_defs:
            def_file.write('%s\t%s\n' % row)
    # Extract examples
    records = wn.schema.ex.select(orderby='synsetid')
    synsets_examples = []
    for r in records:
        synsets_examples.append((SynsetID.from_string(r.synsetid).to_canonical(), r.sample))
    synsets_examples.sort(key=itemgetter(0))
    with open(output_exes, 'w') as ex_file:
        for row in synsets_examples:
            ex_file.write('%s\t%s\n' % row)
    # summary
    print("Data has been extracted to:")
    print("  + {}".format(output_with_sid_file))
    print("  + {}".format(output_without_sid_file))
    print("  + {}".format(output_defs))
    print("  + {}".format(output_exes))
    print("Done!")
# wordnet data
class WNData:
    def __init__(self, profile, folder='./data/'):
        self.profile = profile
        self.lemmas_file = os.path.join(folder, '{}_lemmas.txt'.format(profile))
        self.defs_file = os.path.join(folder, '{}_defs.txt'.format(profile))
        self.exes_file = os.path.join(folder, '{}_exes.txt'.format(profile))
        self.sids = set()
        self.lemma_map = dd(set)
        self.lemmas = []
        self.def_map = {}
        self.defs = []
        self.exes = []
    def read(self):
        self.read_lemmas()
        self.read_defs()
        return self
    def read_lemmas(self):
        print("Reading file: {}".format(self.lemmas_file))
        with open(self.lemmas_file) as lmfile:
            self.lemmas = list(csv.reader(lmfile, dialect='excel-tab'))
            for sid, lemma in self.lemmas:
                self.sids.add(sid)
                # self.lemma_map[sid].add(lemma)
                self.lemma_map[sid].add(lemma.lower())
            print("Lemma count: {}".format(len(self.lemmas)))
            print("Synset count: {}".format(len(self.lemma_map)))
    def fix_def(self, definition):
        if definition.endswith(';'):
            definition = definition[:-1]
        definition = definition.replace('( ', '(').replace(' )', ')').replace(' , ', ', ')
        return definition
    def read_defs(self):
        print("Reading file: {}".format(self.defs_file))
        with open(self.defs_file) as deffile:
            self.defs = list(csv.reader(deffile, dialect='excel-tab'))
            for sid, definition in self.defs:
                if sid in self.def_map:
                    print("WARNING: multiple definition found for {}".format(sid))
                self.def_map[sid] = self.fix_def(definition)
        print("Def count: {}".format(len(self.defs)))
    def get_sid(self, sid):
        if sid not in self.sids:
            if sid.endswith('r'):
                trysid = sid[:-1] + 's'
            elif sid.endswith('s'):
                trysid = sid[:-1] + 'r'
            if trysid in self.sids:
                return trysid
        else:
            return sid
        return None
    def compare_to(self, other_wn):
        c = Counter()
        for sid in self.sids:
            other_sid = sid
            if sid not in other_wn.lemma_map:
                if sid.endswith('r'):
                    # inconsistent convention (-r = -s)
                    other_sid = sid[:-2] + '-s'
                    if other_sid not in other_wn.sids:
                        print("sid {sid} cannot be found in [{prof}]".format(sid=sid, prof=other_wn.profile))
                        c.count("synset diff")
                        continue
            # we found this sid ...
            if self.lemma_map[sid] != other_wn.lemma_map[other_sid]:
                c.count('Lemma diff')
                print("{}: [{}] vs [{}]".format(sid, self.lemma_map[sid], other_wn.lemma_map[other_sid]))
        # compare defs
        diffs = set()  # store all differences to see 
        for sid in self.sids:
            other_sid = other_wn.get_sid(sid)
            if not other_sid:
                print("Cannot find definition for {}".format(sid))
            mydef = self.def_map[sid]
            odef = other_wn.def_map[other_sid]
            if fuzz.ratio(mydef, odef) < 100:
                diff = simple_diff(mydef, odef)
                if ''.join(set(diff)) not in '(-_ \'";.':
                    diffs.add(diff)
                    c.count("def diff")
                    print("{prof} ({sid}): {mydef}\n{otherprof} ({osid}): {otherdef}\ndiff: {diff}--".format(prof=self.profile, sid=sid, mydef=mydef, otherprof=other_wn.profile, osid=other_sid, otherdef=odef, diff=diff))
            else:
                c.count("def same")
        print('-' * 30)
        with open(os.path.abspath('./data/diffs.txt'), 'w') as outfile:
            outfile.write('\n'.join(diffs))
        print('\n'.join(sorted(diffs)))
        c.summarise()
def simple_diff(a, b):
    '''Extract the difference between two strings'''
    diffs = unified_diff(a, b)
    parts = []
    for d in [x for x in diffs if x[0] in '-+?' and x.strip() not in ['---', '+++']]:
        parts.append(d[1:].strip())
    return ''.join(parts)
def compare_wordnets(args):
    gwn = WNData('glosstag').read()
    wn30 = WNData('wn30').read()
    # compare wordnets
    gwn.compare_to(wn30)
# ----------------------------------------------------------------------
def main():
    '''Main entry
    '''
    # It's easier to create a user-friendly console application by using argparse
    # See reference at the top of this script
    parser = argparse.ArgumentParser(description="WordNet Toolkit - For accessing and manipulating WordNet")
    add_wordnet_config(parser)
    add_logging_config(parser)
    tasks = parser.add_subparsers(title='task', help='Task to be performed')
    cmd_glosstag2ntumc = tasks.add_parser('g2n', help='Export Glosstag data to NTU-MC')
    cmd_glosstag2ntumc.set_defaults(func=glosstag2ntumc)
    cmd_extract = tasks.add_parser('x', help='Extract XML synsets from glosstag')
    cmd_extract.add_argument('source', nargs='?', help='Which Wordnet to be used (wnsql, gloss, etc.)')
    cmd_extract.set_defaults(func=export_wn_synsets)
    cmd_compare = tasks.add_parser('compare', help='Compare extracted wordnets')
    cmd_compare.set_defaults(func=compare_wordnets)
    cmd_info = tasks.add_parser('info', help='Show configuration information')
    cmd_info.set_defaults(func=show_info)
    # Parse input arguments
    if len(sys.argv) > 1:
        args = parser.parse_args()
        config_logging(args)
        args.func(args)
    else:
        parser.print_help()
if __name__ == "__main__":
    main()
 | |
| 
	import copy
import datetime
import json
import os
import threading
import time
from email.utils import formatdate, parsedate_tz, mktime_tz
from mailpile.crypto.streamer import EncryptingStreamer, DecryptingStreamer
from mailpile.i18n import gettext as _
from mailpile.i18n import ngettext as _n
from mailpile.util import EventRLock, EventLock, CleanText, json_helper
from mailpile.util import safe_remove
EVENT_COUNTER_LOCK = threading.Lock()
EVENT_COUNTER = 0
def NewEventId():
    """
    This is guaranteed to generate unique event IDs for up to 1 million
    events per second. Beyond that, all bets are off. :-P
    """
    global EVENT_COUNTER
    with EVENT_COUNTER_LOCK:
        EVENT_COUNTER = EVENT_COUNTER+1
        EVENT_COUNTER %= 0x100000
        return '%8.8x.%5.5x.%x' % (time.time(), EVENT_COUNTER, os.getpid())
def _ClassName(obj):
    if isinstance(obj, (str, unicode)):
        return str(obj).replace('mailpile.', '.')
    elif hasattr(obj, '__classname__'):
        return str(obj.__classname__).replace('mailpile.', '.')
    else:
        return str(obj.__class__).replace('mailpile.', '.')
class Event(object):
    """
    This is a single event in the event log. Actual interpretation and
    rendering of events should be handled by the respective source class.
    """
    RUNNING = 'R'
    COMPLETE = 'c'
    INCOMPLETE = 'i'
    FUTURE = 'F'
    # For now these live here, we may templatize this later.
    PREAMBLE_HTML = '<ul class="events">'
    PUBLIC_HTML = ('<li><span class="event_date">%(date)s</span> '
                   '<b class="event_message">%(message)s</b></li>')
    PRIVATE_HTML = PUBLIC_HTML
    POSTAMBLE_HTML = '</ul>'
    @classmethod
    def Parse(cls, json_string):
        try:
            return cls(*json.loads(json_string))
        except:
            return cls()
    def __init__(self,
                 ts=None, event_id=None, flags='c', message='',
                 source=None, data=None, private_data=None):
        self._data = [
            '',
            event_id or NewEventId(),
            flags,
            message,
            _ClassName(source),
            data or {},
            private_data or {},
        ]
        self._set_ts(ts or time.time())
    def __str__(self):
        return json.dumps(self._data, default=json_helper)
    def _set_ts(self, ts):
        if hasattr(ts, 'timetuple'):
            self._ts = int(time.mktime(ts.timetuple()))
        elif isinstance(ts, (str, unicode)):
            self._ts = int(mktime_tz(parsedate_tz(ts)))
        else:
            self._ts = float(ts)
        self._data[0] = formatdate(self._ts)
    def _set(self, col, value):
        self._set_ts(time.time())
        self._data[col] = value
    def _get_source_class(self):
        try:
            module_name, class_name = CleanText(self.source,
                                                banned=CleanText.NONDNS
                                                ).clean.rsplit('.', 1)
            if module_name.startswith('.'):
                module_name = 'mailpile' + module_name
            module = __import__(module_name, globals(), locals(), class_name)
            return getattr(module, class_name)
        except (ValueError, AttributeError, ImportError):
            return None
    date = property(lambda s: s._data[0], lambda s, v: s._set_ts(v))
    ts = property(lambda s: s._ts, lambda s, v: s._set_ts(v))
    event_id = property(lambda s: s._data[1], lambda s, v: s._set(1, v))
    flags = property(lambda s: s._data[2], lambda s, v: s._set(2, v))
    message = property(lambda s: s._data[3], lambda s, v: s._set(3, v))
    source = property(lambda s: s._data[4],
                      lambda s, v: s._set(4, _ClassName(v)))
    data = property(lambda s: s._data[5], lambda s, v: s._set(5, v))
    private_data = property(lambda s: s._data[6], lambda s, v: s._set(6, v))
    source_class = property(_get_source_class)
    def as_dict(self, private=True):
        try:
            return self.source_class.EventAsDict(self, private=private)
        except (AttributeError, NameError):
            data = {
                'ts': self.ts,
                'date': self.date,
                'event_id': self.event_id,
                'message': self.message,
                'flags': self.flags,
                'source': self.source,
                'data': self.data
            }
            if private:
                data['private_data'] = self.private_data
            return data
    def as_json(self, private=True):
        try:
            return self.source_class.EventAsJson(self, private=private)
        except (AttributeError, NameError):
            return json.dumps(self.as_dict(private=private))
    def as_html(self, private=True):
        try:
            return self.source_class.EventAsHtml(self, private=private)
        except (AttributeError, NameError):
            if private:
                return self.PRIVATE_HTML % self.as_dict(private=True)
            else:
                return self.PUBLIC_HTML % self.as_dict(private=False)
class EventLog(object):
    """
    This is the Mailpile Event Log.
    The log is written encrypted to disk on an ongoing basis (rotated
    every N lines), but entries are kept in RAM as well. The event log
    allows for recording of incomplete events, to help different parts
    of the app "remember" tasks which have yet to complete or may need
    to be retried.
    """
    KEEP_LOGS = 2
    def __init__(self, logdir, decryption_key_func, encryption_key_func,
                 rollover=1024):
        self.logdir = logdir
        self.decryption_key_func = decryption_key_func or (lambda: None)
        self.encryption_key_func = encryption_key_func or (lambda: None)
        self.rollover = rollover
        self._events = {}
        # Internals...
        self._waiter = threading.Condition(EventRLock())
        self._lock = EventLock()
        self._log_fd = None
    def _notify_waiters(self):
        with self._waiter:
            self._waiter.notifyAll()
    def wait(self, timeout=None):
        with self._waiter:
            self._waiter.wait(timeout)
    def _save_filename(self):
        return os.path.join(self.logdir, self._log_start_id)
    def _open_log(self):
        if self._log_fd:
            self._log_fd.close()
        if not os.path.exists(self.logdir):
            os.mkdir(self.logdir)
        self._log_start_id = NewEventId()
        enc_key = self.encryption_key_func()
        if enc_key:
            self._log_fd = EncryptingStreamer(enc_key,
                                              dir=self.logdir,
                                              name='EventLog/ES',
                                              long_running=True)
            self._log_fd.save(self._save_filename(), finish=False)
        else:
            self._log_fd = open(self._save_filename(), 'wb', 0)
        # Write any incomplete events to the new file
        for e in self.incomplete():
            self._log_fd.write('%s\n' % e)
        # We're starting over, incomplete events don't count
        self._logged = 0
    def _maybe_rotate_log(self):
        if self._logged > self.rollover:
            self._log_fd.close()
            kept_events = {}
            for e in self.incomplete():
                kept_events[e.event_id] = e
            self._events = kept_events
            self._open_log()
            self.purge_old_logfiles()
    def _list_logfiles(self):
        return sorted([l for l in os.listdir(self.logdir)
                       if not l.startswith('.')])
    def _save_events(self, events, recursed=False):
        if not self._log_fd:
            self._open_log()
        events.sort(key=lambda ev: ev.ts)
        try:
            for event in events:
                self._log_fd.write('%s\n' % event)
                self._events[event.event_id] = event
        except IOError:
            if recursed:
                raise
            else:
                self._unlocked_close()
                return self._save_events(events, recursed=True)
    def _load_logfile(self, lfn):
        enc_key = self.decryption_key_func()
        with open(os.path.join(self.logdir, lfn)) as fd:
            if enc_key:
                with DecryptingStreamer(fd, mep_key=enc_key,
                                        name='EventLog/DS') as streamer:
                    lines = streamer.read()
                    streamer.verify(_raise=IOError)
            else:
                lines = fd.read()
            if lines:
                for line in lines.splitlines():
                    event = Event.Parse(line)
                    self._events[event.event_id] = event
    def _match(self, event, filters):
        for kw, rule in filters.iteritems():
            if kw.endswith('!'):
                truth, okw, kw = False, kw, kw[:-1]
            else:
                truth, okw = True, kw
            if kw == 'source':
                if truth != (event.source == _ClassName(rule)):
                    return False
            elif kw == 'flag':
                if truth != (rule in event.flags):
                    return False
            elif kw == 'flags':
                if truth != (event.flags == rule):
                    return False
            elif kw == 'event_id':
                if truth != (event.event_id == rule):
                    return False
            elif kw == 'since':
                when = float(rule)
                if when < 0:
                    when += time.time()
                if truth != (event.ts > when):
                    return False
            elif kw.startswith('data_'):
                if truth != (str(event.data.get(kw[5:])) == str(rule)):
                    return False
            elif kw.startswith('private_data_'):
                if truth != (str(event.data.get(kw[13:])) == str(rule)):
                    return False
            else:
                # Unknown keywords match nothing...
                print 'Unknown keyword: `%s=%s`' % (okw, rule)
                return False
        return True
    def incomplete(self, **filters):
        """Return all the incomplete events, in order."""
        if 'event_id' in filters:
            ids = [filters['event_id']]
        else:
            ids = sorted(self._events.keys())
        for ek in ids:
            e = self._events.get(ek, None)
            if (e is not None and
                    Event.COMPLETE not in e.flags and
                    self._match(e, filters)):
                yield e
    def since(self, ts, **filters):
        """Return all events since a given time, in order."""
        if ts < 0:
            ts += time.time()
        if 'event_id' in filters and filters['event_id'][:1] != '!':
            ids = [filters['event_id']]
        else:
            ids = sorted(self._events.keys())
        for ek in ids:
            e = self._events.get(ek, None)
            if (e is not None and
                    e.ts >= ts and
                    self._match(e, filters)):
                yield e
    def events(self, **filters):
        return self.since(0, **filters)
    def get(self, event_id, default=None):
        return self._events.get(event_id, default)
    def log_event(self, event):
        """Log an Event object."""
        with self._lock:
            self._save_events([event])
            self._logged += 1
            self._maybe_rotate_log()
            self._notify_waiters()
        return event
    def log(self, *args, **kwargs):
        """Log a new event."""
        return self.log_event(Event(*args, **kwargs))
    def close(self):
        with self._lock:
            return self._unlocked_close()
    def _unlocked_close(self):
        try:
            self._log_fd.close()
            self._log_fd = None
        except (OSError, IOError):
            pass
    def _prune_completed(self):
        for event_id in self._events.keys():
            if Event.COMPLETE in self._events[event_id].flags:
                del self._events[event_id]
    def load(self):
        with self._lock:
            self._open_log()
            for lf in self._list_logfiles()[-4:]:
                try:
                    self._load_logfile(lf)
                except (OSError, IOError):
                    # Nothing we can do, no point complaining...
                    pass
            self._prune_completed()
            self._save_events(self._events.values())
            return self
    def purge_old_logfiles(self, keep=None):
        keep = keep or self.KEEP_LOGS
        for lf in self._list_logfiles()[:-keep]:
            try:
                safe_remove(os.path.join(self.logdir, lf))
            except OSError:
                pass
 | |
| 
	# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
    def forwards(self, orm):
        # Changing field 'Attachment.attachment'
        db.alter_column('courses_attachment', 'attachment', self.gf('django.db.models.fields.files.FileField')(max_length=200))
    def backwards(self, orm):
        # Changing field 'Attachment.attachment'
        db.alter_column('courses_attachment', 'attachment', self.gf('django.db.models.fields.files.FileField')(max_length=100))
    models = {
        'auth.group': {
            'Meta': {'object_name': 'Group'},
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
            'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
        },
        'auth.permission': {
            'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
            'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
            'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
        },
        'auth.user': {
            'Meta': {'object_name': 'User'},
            'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
            'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
            'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
            'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
            'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
            'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
            'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
            'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
            'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '254'})
        },
        'badges.alignment': {
            'Meta': {'object_name': 'Alignment'},
            'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
            'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
        },
        'badges.badge': {
            'Meta': {'ordering': "['-modified', '-created']", 'object_name': 'Badge'},
            'alignments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'alignments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['badges.Alignment']"}),
            'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
            'criteria': ('django.db.models.fields.URLField', [], {'max_length': '255'}),
            'description': ('django.db.models.fields.TextField', [], {}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
            'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
            'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
            'tags': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'tags'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['badges.Tag']"}),
            'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
        },
        'badges.tag': {
            'Meta': {'object_name': 'Tag'},
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
        },
        'contenttypes.contenttype': {
            'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
            'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
        },
        'courses.announcement': {
            'Meta': {'ordering': "('-datetime',)", 'object_name': 'Announcement'},
            'content': ('tinymce.models.HTMLField', [], {}),
            'course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.Course']", 'null': 'True', 'blank': 'True'}),
            'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
            'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
        },
        'courses.attachment': {
            'Meta': {'object_name': 'Attachment'},
            'attachment': ('django.db.models.fields.files.FileField', [], {'max_length': '200'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'kq': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.KnowledgeQuantum']"})
        },
        'courses.course': {
            'Meta': {'ordering': "['order']", 'object_name': 'Course'},
            'background': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
            'certification_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'certification_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
            'completion_badge': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'course'", 'null': 'True', 'to': "orm['badges.Badge']"}),
            'created_from': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'courses_created_of'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['courses.Course']"}),
            'description': ('tinymce.models.HTMLField', [], {}),
            'ects': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '8'}),
            'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
            'enrollment_method': ('django.db.models.fields.CharField', [], {'default': "'free'", 'max_length': '200'}),
            'estimated_effort': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
            'external_certification_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'forum_slug': ('django.db.models.fields.CharField', [], {'max_length': '350', 'null': 'True', 'blank': 'True'}),
            'group_max_size': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '50'}),
            'has_groups': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'hashtag': ('django.db.models.fields.CharField', [], {'default': "'Hashtag'", 'max_length': '128'}),
            'highlight': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'intended_audience': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
            'is_activity_clonable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
            'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['courses.Language']", 'symmetrical': 'False'}),
            'learning_goals': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
            'max_mass_emails_month': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '200'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
            'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
            'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'courses_as_owner'", 'to': "orm['auth.User']"}),
            'promotion_media_content_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
            'promotion_media_content_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
            'requirements': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
            'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
            'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
            'static_page': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['courses.StaticPage']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
            'status': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '10'}),
            'students': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'courses_as_student'", 'blank': 'True', 'through': "orm['courses.CourseStudent']", 'to': "orm['auth.User']"}),
            'teachers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'courses_as_teacher'", 'symmetrical': 'False', 'through': "orm['courses.CourseTeacher']", 'to': "orm['auth.User']"}),
            'threshold': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '4', 'decimal_places': '2', 'blank': 'True'}),
            'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
            'thumbnail_alt': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
        },
        'courses.coursestudent': {
            'Meta': {'object_name': 'CourseStudent'},
            'course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.Course']"}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'old_course_status': ('django.db.models.fields.CharField', [], {'default': "'f'", 'max_length': '1'}),
            'pos_lat': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
            'pos_lon': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
            'progress': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
            'rate': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
            'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
            'timestamp': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'})
        },
        'courses.courseteacher': {
            'Meta': {'ordering': "['order']", 'object_name': 'CourseTeacher'},
            'course': ('adminsortable.fields.SortableForeignKey', [], {'to': "orm['courses.Course']"}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
            'teacher': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
        },
        'courses.knowledgequantum': {
            'Meta': {'ordering': "['order']", 'unique_together': "(('title', 'unit'),)", 'object_name': 'KnowledgeQuantum'},
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'media_content_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
            'media_content_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
            'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
            'supplementary_material': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
            'teacher_comments': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
            'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
            'unit': ('adminsortable.fields.SortableForeignKey', [], {'to': "orm['courses.Unit']"}),
            'weight': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
        },
        'courses.language': {
            'Meta': {'object_name': 'Language'},
            'abbr': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
        },
        'courses.option': {
            'Meta': {'unique_together': "(('question', 'x', 'y'),)", 'object_name': 'Option'},
            'feedback': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
            'height': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '12'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
            'optiontype': ('django.db.models.fields.CharField', [], {'default': "'t'", 'max_length': '1'}),
            'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
            'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.Question']"}),
            'solution': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
            'text': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
            'width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '100'}),
            'x': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
            'y': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
        },
        'courses.question': {
            'Meta': {'object_name': 'Question'},
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'kq': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.KnowledgeQuantum']", 'unique': 'True'}),
            'last_frame': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
            'solution_media_content_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
            'solution_media_content_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
            'solution_text': ('tinymce.models.HTMLField', [], {'blank': 'True'}),
            'use_last_frame': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
        },
        'courses.staticpage': {
            'Meta': {'object_name': 'StaticPage'},
            'body': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
        },
        'courses.transcription': {
            'Meta': {'object_name': 'Transcription'},
            'filename': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'kq': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.KnowledgeQuantum']"}),
            'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['courses.Language']"}),
            'transcription_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'})
        },
        'courses.unit': {
            'Meta': {'ordering': "['order']", 'unique_together': "(('title', 'course'),)", 'object_name': 'Unit'},
            'course': ('adminsortable.fields.SortableForeignKey', [], {'to': "orm['courses.Course']"}),
            'deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
            'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
            'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
            'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
            'status': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '10'}),
            'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
            'unittype': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
            'weight': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
        }
    }
    complete_apps = ['courses']
 | |
| 
	#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the REST API."""
import binascii
from decimal import Decimal
from enum import Enum
import http.client
from io import BytesIO
import json
from struct import pack, unpack
import urllib.parse
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
    assert_equal,
    assert_greater_than,
    assert_greater_than_or_equal,
    hex_str_to_bytes,
)
class ReqType(Enum):
    JSON = 1
    BIN = 2
    HEX = 3
class RetType(Enum):
    OBJ = 1
    BYTES = 2
    JSON = 3
def filter_output_indices_by_value(vouts, value):
    for vout in vouts:
        if vout['value'] == value:
            yield vout['n']
class RESTTest (BitcoinTestFramework):
    def set_test_params(self):
        self.setup_clean_chain = True
        self.num_nodes = 2
        self.extra_args = [["-rest"], []]
    def skip_test_if_missing_module(self):
        self.skip_if_no_wallet()
    def test_rest_request(self, uri, http_method='GET', req_type=ReqType.JSON,
                          body='', status=200, ret_type=RetType.JSON):
        rest_uri = '/rest' + uri
        if req_type == ReqType.JSON:
            rest_uri += '.json'
        elif req_type == ReqType.BIN:
            rest_uri += '.bin'
        elif req_type == ReqType.HEX:
            rest_uri += '.hex'
        conn = http.client.HTTPConnection(self.url.hostname, self.url.port)
        self.log.debug('{} {} {}'.format(http_method, rest_uri, body))
        if http_method == 'GET':
            conn.request('GET', rest_uri)
        elif http_method == 'POST':
            conn.request('POST', rest_uri, body)
        resp = conn.getresponse()
        assert_equal(resp.status, status)
        if ret_type == RetType.OBJ:
            return resp
        elif ret_type == RetType.BYTES:
            return resp.read()
        elif ret_type == RetType.JSON:
            return json.loads(resp.read().decode('utf-8'), parse_float=Decimal)
    def run_test(self):
        self.url = urllib.parse.urlparse(self.nodes[0].url)
        self.log.info("Mine blocks and send Bitcoin Cash to node 1")
        # Random address so node1's balance doesn't increase
        not_related_address = "2MxqoHEdNQTyYeX1mHcbrrpzgojbosTpCvJ"
        self.nodes[0].generate(1)
        self.sync_all()
        self.nodes[1].generatetoaddress(100, not_related_address)
        self.sync_all()
        assert_equal(self.nodes[0].getbalance(), 50)
        txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
        self.sync_all()
        self.nodes[1].generatetoaddress(1, not_related_address)
        self.sync_all()
        bb_hash = self.nodes[0].getbestblockhash()
        assert_equal(self.nodes[1].getbalance(), Decimal("0.1"))
        self.log.info("Load the transaction using the /tx URI")
        json_obj = self.test_rest_request("/tx/{}".format(txid))
        # Get the vin to later check for utxo (should be spent by then)
        spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout'])
        # Get n of 0.1 outpoint
        n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
        spending = (txid, n)
        self.log.info("Query an unspent TXO using the /getutxos URI")
        json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
        # Check chainTip response
        assert_equal(json_obj['chaintipHash'], bb_hash)
        # Make sure there is one utxo
        assert_equal(len(json_obj['utxos']), 1)
        assert_equal(json_obj['utxos'][0]['value'], Decimal('0.1'))
        self.log.info("Query a spent TXO using the /getutxos URI")
        json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent))
        # Check chainTip response
        assert_equal(json_obj['chaintipHash'], bb_hash)
        # Make sure there is no utxo in the response because this outpoint has
        # been spent
        assert_equal(len(json_obj['utxos']), 0)
        # Check bitmap
        assert_equal(json_obj['bitmap'], "0")
        self.log.info("Query two TXOs using the /getutxos URI")
        json_obj = self.test_rest_request(
            "/getutxos/{}-{}/{}-{}".format(*(spending + spent)))
        assert_equal(len(json_obj['utxos']), 1)
        assert_equal(json_obj['bitmap'], "10")
        self.log.info(
            "Query the TXOs using the /getutxos URI with a binary response")
        bin_request = b'\x01\x02'
        for txid, n in [spending, spent]:
            bin_request += hex_str_to_bytes(txid)
            bin_request += pack("i", n)
        bin_response = self.test_rest_request(
            "/getutxos", http_method='POST', req_type=ReqType.BIN, body=bin_request, ret_type=RetType.BYTES)
        output = BytesIO(bin_response)
        chain_height, = unpack("i", output.read(4))
        response_hash = output.read(32)[::-1].hex()
        # Check if getutxo's chaintip during calculation was fine
        assert_equal(bb_hash, response_hash)
        # Chain height must be 102
        assert_equal(chain_height, 102)
        self.log.info("Test the /getutxos URI with and without /checkmempool")
        # Create a transaction, check that it's found with /checkmempool, but
        # not found without. Then confirm the transaction and check that it's
        # found with or without /checkmempool.
        # Do a tx and don't sync
        txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
        json_obj = self.test_rest_request("/tx/{}".format(txid))
        # Get the spent output to later check for utxo (should be spent by
        # then)
        spent = (json_obj['vin'][0]['txid'], json_obj['vin'][0]['vout'])
        # Get n of 0.1 outpoint
        n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
        spending = (txid, n)
        json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
        assert_equal(len(json_obj['utxos']), 0)
        json_obj = self.test_rest_request(
            "/getutxos/checkmempool/{}-{}".format(*spending))
        assert_equal(len(json_obj['utxos']), 1)
        json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spent))
        assert_equal(len(json_obj['utxos']), 1)
        json_obj = self.test_rest_request(
            "/getutxos/checkmempool/{}-{}".format(*spent))
        assert_equal(len(json_obj['utxos']), 0)
        self.nodes[0].generate(1)
        self.sync_all()
        json_obj = self.test_rest_request("/getutxos/{}-{}".format(*spending))
        assert_equal(len(json_obj['utxos']), 1)
        json_obj = self.test_rest_request(
            "/getutxos/checkmempool/{}-{}".format(*spending))
        assert_equal(len(json_obj['utxos']), 1)
        # Do some invalid requests
        self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.JSON,
                               body='{"checkmempool', status=400, ret_type=RetType.OBJ)
        self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.BIN,
                               body='{"checkmempool', status=400, ret_type=RetType.OBJ)
        self.test_rest_request("/getutxos/checkmempool", http_method='POST',
                               req_type=ReqType.JSON, status=400, ret_type=RetType.OBJ)
        # Test limits
        long_uri = '/'.join(["{}-{}".format(txid, n_) for n_ in range(20)])
        self.test_rest_request("/getutxos/checkmempool/{}".format(long_uri),
                               http_method='POST', status=400, ret_type=RetType.OBJ)
        long_uri = '/'.join(['{}-{}'.format(txid, n_) for n_ in range(15)])
        self.test_rest_request(
            "/getutxos/checkmempool/{}".format(long_uri), http_method='POST', status=200)
        # Generate block to not affect upcoming tests
        self.nodes[0].generate(
            1)
        self.sync_all()
        self.log.info("Test the /block and /headers URIs")
        bb_hash = self.nodes[0].getbestblockhash()
        # Check binary format
        response = self.test_rest_request(
            "/block/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ)
        assert_greater_than(int(response.getheader('content-length')), 80)
        response_bytes = response.read()
        # Compare with block header
        response_header = self.test_rest_request(
            "/headers/1/{}".format(bb_hash), req_type=ReqType.BIN, ret_type=RetType.OBJ)
        assert_equal(int(response_header.getheader('content-length')), 80)
        response_header_bytes = response_header.read()
        assert_equal(response_bytes[0:80], response_header_bytes)
        # Check block hex format
        response_hex = self.test_rest_request(
            "/block/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ)
        assert_greater_than(int(response_hex.getheader('content-length')), 160)
        response_hex_bytes = response_hex.read().strip(b'\n')
        assert_equal(binascii.hexlify(response_bytes), response_hex_bytes)
        # Compare with hex block header
        response_header_hex = self.test_rest_request(
            "/headers/1/{}".format(bb_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ)
        assert_greater_than(
            int(response_header_hex.getheader('content-length')), 160)
        response_header_hex_bytes = response_header_hex.read(160)
        assert_equal(binascii.hexlify(
            response_bytes[:80]), response_header_hex_bytes)
        # Check json format
        block_json_obj = self.test_rest_request("/block/{}".format(bb_hash))
        assert_equal(block_json_obj['hash'], bb_hash)
        # Compare with json block header
        json_obj = self.test_rest_request("/headers/1/{}".format(bb_hash))
        # Ensure that there is one header in the json response
        assert_equal(len(json_obj), 1)
        # Request/response hash should be the same
        assert_equal(json_obj[0]['hash'], bb_hash)
        # Compare with normal RPC block response
        rpc_block_json = self.nodes[0].getblock(bb_hash)
        for key in ['hash', 'confirmations', 'height', 'version', 'merkleroot',
                    'time', 'nonce', 'bits', 'difficulty', 'chainwork', 'previousblockhash']:
            assert_equal(json_obj[0][key], rpc_block_json[key])
        # See if we can get 5 headers in one response
        self.nodes[1].generate(5)
        self.sync_all()
        json_obj = self.test_rest_request("/headers/5/{}".format(bb_hash))
        # Now we should have 5 header objects
        assert_equal(len(json_obj), 5)
        self.log.info("Test the /tx URI")
        tx_hash = block_json_obj['tx'][0]['txid']
        json_obj = self.test_rest_request("/tx/{}".format(tx_hash))
        assert_equal(json_obj['txid'], tx_hash)
        # Check hex format response
        hex_response = self.test_rest_request(
            "/tx/{}".format(tx_hash), req_type=ReqType.HEX, ret_type=RetType.OBJ)
        assert_greater_than_or_equal(
            int(hex_response.getheader('content-length')), json_obj['size'] * 2)
        self.log.info("Test tx inclusion in the /mempool and /block URIs")
        # Make 3 tx and mine them on node 1
        txs = []
        txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
        txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
        txs.append(self.nodes[0].sendtoaddress(not_related_address, 11))
        self.sync_all()
        # Check that there are exactly 3 transactions in the TX memory pool
        # before generating the block
        json_obj = self.test_rest_request("/mempool/info")
        assert_equal(json_obj['size'], 3)
        # The size of the memory pool should be greater than 3x ~100 bytes
        assert_greater_than(json_obj['bytes'], 300)
        # Check that there are our submitted transactions in the TX memory pool
        json_obj = self.test_rest_request("/mempool/contents")
        for i, tx in enumerate(txs):
            assert tx in json_obj
            assert_equal(json_obj[tx]['spentby'], txs[i + 1:i + 2])
            assert_equal(json_obj[tx]['depends'], txs[i - 1:i])
        # Now mine the transactions
        newblockhash = self.nodes[1].generate(1)
        self.sync_all()
        # Check if the 3 tx show up in the new block
        json_obj = self.test_rest_request("/block/{}".format(newblockhash[0]))
        non_coinbase_txs = {tx['txid']
                            for tx in json_obj['tx'] if 'coinbase' not in tx['vin'][0]}
        assert_equal(non_coinbase_txs, set(txs))
        # Check the same but without tx details
        json_obj = self.test_rest_request(
            "/block/notxdetails/{}".format(newblockhash[0]))
        for tx in txs:
            assert tx in json_obj['tx']
        self.log.info("Test the /chaininfo URI")
        bb_hash = self.nodes[0].getbestblockhash()
        json_obj = self.test_rest_request("/chaininfo")
        assert_equal(json_obj['bestblockhash'], bb_hash)
if __name__ == '__main__':
    RESTTest().main()
 | |
| 
	#     Copyright 2021, Kay Hayen, mailto:[email protected]
#
#     Part of "Nuitka", an optimizing Python compiler that is compatible and
#     integrates with CPython, but also works on its own.
#
#     Licensed under the Apache License, Version 2.0 (the "License");
#     you may not use this file except in compliance with the License.
#     You may obtain a copy of the License at
#
#        http://www.apache.org/licenses/LICENSE-2.0
#
#     Unless required by applicable law or agreed to in writing, software
#     distributed under the License is distributed on an "AS IS" BASIS,
#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#     See the License for the specific language governing permissions and
#     limitations under the License.
#
""" Reformulation of call expressions.
Consult the developer manual for information. TODO: Add ability to sync
source code comments with developer manual sections.
"""
from nuitka.nodes.AssignNodes import StatementAssignmentVariable
from nuitka.nodes.CallNodes import makeExpressionCall
from nuitka.nodes.ConstantRefNodes import makeConstantRefNode
from nuitka.nodes.ContainerMakingNodes import (
    makeExpressionMakeTuple,
    makeExpressionMakeTupleOrConstant,
)
from nuitka.nodes.DictionaryNodes import (
    makeExpressionMakeDictOrConstant,
    makeExpressionPairs,
)
from nuitka.nodes.FunctionNodes import (
    ExpressionFunctionCall,
    ExpressionFunctionCreation,
    ExpressionFunctionRef,
)
from nuitka.nodes.OutlineNodes import ExpressionOutlineBody
from nuitka.nodes.ReturnNodes import StatementReturn
from nuitka.nodes.VariableRefNodes import ExpressionTempVariableRef
from nuitka.PythonVersions import python_version
from .ComplexCallHelperFunctions import (
    getFunctionCallHelperDictionaryUnpacking,
    getFunctionCallHelperKeywordsStarDict,
    getFunctionCallHelperKeywordsStarList,
    getFunctionCallHelperKeywordsStarListStarDict,
    getFunctionCallHelperPosKeywordsStarDict,
    getFunctionCallHelperPosKeywordsStarList,
    getFunctionCallHelperPosKeywordsStarListStarDict,
    getFunctionCallHelperPosStarDict,
    getFunctionCallHelperPosStarList,
    getFunctionCallHelperPosStarListStarDict,
    getFunctionCallHelperStarDict,
    getFunctionCallHelperStarList,
    getFunctionCallHelperStarListStarDict,
)
from .ReformulationDictionaryCreation import buildDictionaryUnpackingArgs
from .ReformulationSequenceCreation import buildListUnpacking
from .TreeHelpers import (
    buildNode,
    buildNodeList,
    getKind,
    makeStatementsSequenceFromStatements,
)
def buildCallNode(provider, node, source_ref):
    called = buildNode(provider, node.func, source_ref)
    if python_version >= 0x350:
        list_star_arg = None
        dict_star_arg = None
    positional_args = []
    # For Python3.5 compatibility, the error handling with star argument last
    # is the old one, only with a starred argument before that, things use the
    # new unpacking code.
    for node_arg in node.args[:-1]:
        if getKind(node_arg) == "Starred":
            assert python_version >= 0x350
            list_star_arg = buildListUnpacking(provider, node.args, source_ref)
            positional_args = []
            break
    else:
        if node.args and getKind(node.args[-1]) == "Starred":
            assert python_version >= 0x350
            list_star_arg = buildNode(provider, node.args[-1].value, source_ref)
            positional_args = buildNodeList(provider, node.args[:-1], source_ref)
        else:
            positional_args = buildNodeList(provider, node.args, source_ref)
    # Only the values of keyword pairs have a real source ref, and those only
    # really matter, so that makes sense.
    keys = []
    values = []
    for keyword in node.keywords[:-1]:
        if keyword.arg is None:
            assert python_version >= 0x350
            outline_body = ExpressionOutlineBody(
                provider=provider, name="dict_unpacking_call", source_ref=source_ref
            )
            tmp_called = outline_body.allocateTempVariable(
                temp_scope=None, name="called"
            )
            helper_args = [
                ExpressionTempVariableRef(variable=tmp_called, source_ref=source_ref),
                makeExpressionMakeTuple(
                    elements=buildDictionaryUnpackingArgs(
                        provider=provider,
                        keys=(keyword.arg for keyword in node.keywords),
                        values=(keyword.value for keyword in node.keywords),
                        source_ref=source_ref,
                    ),
                    source_ref=source_ref,
                ),
            ]
            dict_star_arg = ExpressionFunctionCall(
                function=ExpressionFunctionCreation(
                    function_ref=ExpressionFunctionRef(
                        function_body=getFunctionCallHelperDictionaryUnpacking(),
                        source_ref=source_ref,
                    ),
                    defaults=(),
                    kw_defaults=None,
                    annotations=None,
                    source_ref=source_ref,
                ),
                values=helper_args,
                source_ref=source_ref,
            )
            outline_body.setChild(
                "body",
                makeStatementsSequenceFromStatements(
                    StatementAssignmentVariable(
                        variable=tmp_called, source=called, source_ref=source_ref
                    ),
                    StatementReturn(
                        expression=_makeCallNode(
                            called=ExpressionTempVariableRef(
                                variable=tmp_called, source_ref=source_ref
                            ),
                            positional_args=positional_args,
                            keys=keys,
                            values=values,
                            list_star_arg=list_star_arg,
                            dict_star_arg=dict_star_arg,
                            source_ref=source_ref,
                        ),
                        source_ref=source_ref,
                    ),
                ),
            )
            return outline_body
    # For Python3.5 compatibility, the error handling with star argument last
    # is the old one, only with a starred argument before that, things use the
    # new unpacking code.
    if node.keywords and node.keywords[-1].arg is None:
        assert python_version >= 0x350
        dict_star_arg = buildNode(provider, node.keywords[-1].value, source_ref)
        keywords = node.keywords[:-1]
    else:
        keywords = node.keywords
    for keyword in keywords:
        keys.append(
            makeConstantRefNode(
                constant=keyword.arg, source_ref=source_ref, user_provided=True
            )
        )
        values.append(buildNode(provider, keyword.value, source_ref))
    if python_version < 0x350:
        list_star_arg = buildNode(provider, node.starargs, source_ref, True)
        dict_star_arg = buildNode(provider, node.kwargs, source_ref, True)
    return _makeCallNode(
        called=called,
        positional_args=positional_args,
        keys=keys,
        values=values,
        list_star_arg=list_star_arg,
        dict_star_arg=dict_star_arg,
        source_ref=source_ref,
    )
def _makeCallNode(
    called, positional_args, keys, values, list_star_arg, dict_star_arg, source_ref
):
    # Many variables, but only to cover the many complex call cases.
    if list_star_arg is None and dict_star_arg is None:
        result = makeExpressionCall(
            called=called,
            args=makeExpressionMakeTupleOrConstant(
                elements=positional_args,
                user_provided=True,
                source_ref=source_ref,
            ),
            kw=makeExpressionMakeDictOrConstant(
                makeExpressionPairs(keys=keys, values=values),
                user_provided=True,
                source_ref=source_ref,
            ),
            source_ref=source_ref,
        )
        # Bug compatible line numbers before Python 3.8
        if python_version < 0x380:
            if values:
                result.setCompatibleSourceReference(
                    source_ref=values[-1].getCompatibleSourceReference()
                )
            elif positional_args:
                result.setCompatibleSourceReference(
                    source_ref=positional_args[-1].getCompatibleSourceReference()
                )
        return result
    else:
        # Dispatch to complex helper function for each case. These do
        # re-formulation of complex calls according to developer manual.
        key = (
            bool(positional_args),
            bool(keys),
            list_star_arg is not None,
            dict_star_arg is not None,
        )
        table = {
            (True, True, True, False): getFunctionCallHelperPosKeywordsStarList,
            (True, False, True, False): getFunctionCallHelperPosStarList,
            (False, True, True, False): getFunctionCallHelperKeywordsStarList,
            (False, False, True, False): getFunctionCallHelperStarList,
            (True, True, False, True): getFunctionCallHelperPosKeywordsStarDict,
            (True, False, False, True): getFunctionCallHelperPosStarDict,
            (False, True, False, True): getFunctionCallHelperKeywordsStarDict,
            (False, False, False, True): getFunctionCallHelperStarDict,
            (True, True, True, True): getFunctionCallHelperPosKeywordsStarListStarDict,
            (True, False, True, True): getFunctionCallHelperPosStarListStarDict,
            (False, True, True, True): getFunctionCallHelperKeywordsStarListStarDict,
            (False, False, True, True): getFunctionCallHelperStarListStarDict,
        }
        get_helper = table[key]
        helper_args = [called]
        if positional_args:
            helper_args.append(
                makeExpressionMakeTupleOrConstant(
                    elements=positional_args,
                    user_provided=True,
                    source_ref=source_ref,
                )
            )
        # Order of evaluation changed in Python3.5.
        if python_version >= 0x350 and list_star_arg is not None:
            helper_args.append(list_star_arg)
        if keys:
            helper_args.append(
                makeExpressionMakeDictOrConstant(
                    pairs=makeExpressionPairs(keys=keys, values=values),
                    user_provided=True,
                    source_ref=source_ref,
                )
            )
        # Order of evaluation changed in Python3.5.
        if python_version < 0x350 and list_star_arg is not None:
            helper_args.append(list_star_arg)
        if dict_star_arg is not None:
            helper_args.append(dict_star_arg)
        result = ExpressionFunctionCall(
            function=ExpressionFunctionCreation(
                function_ref=ExpressionFunctionRef(
                    function_body=get_helper(), source_ref=source_ref
                ),
                defaults=(),
                kw_defaults=None,
                annotations=None,
                source_ref=source_ref,
            ),
            values=helper_args,
            source_ref=source_ref,
        )
        # Bug compatible line numbers before Python 3.8
        if python_version < 0x380:
            result.setCompatibleSourceReference(
                source_ref=helper_args[-1].getCompatibleSourceReference()
            )
        return result
 | |
| 
	#!/usr/bin/env python
"""
Code mostly adopted from: http://blog.jacobean.net/?p=1016
"""
from signal import alarm, signal, SIGALRM, SIGKILL
import sys 
import pygame
import os 
import pygameui as ui
import logging
import pywapi
import string
import time 
from time import localtime, strftime
import daemon
# location for Highland Park, NJ
weatherDotComLocationCode = 'USNJ0215'
# convert mph = kpd / kphToMph
kphToMph = 1.60934400061
# font colors
colourWhite = (255,255,255)
colourBlack = (0,0,0)
# update interval
updateRate = 60  # seconds
class pitft:
        screen = None;
        colourBlack = (0,0,0)
        def __init__(self):
		class Alarm(Exception):
			pass
		def alarm_handler(signum, frame):
			raise Alarm
                disp_no = os.getenv("DISPLAY")
                if disp_no:
                        print "I'm running under X display = {0}".format(disp_no)
                os.putenv('SDL_FBDEV', '/dev/fb1')
                
                drivers = ['fbcon', 'directfb', 'svgalib']
                found = False
                for driver in drivers:
                        if not os.getenv('SDL_VIDEODRIVER'):
                                os.putenv('SDL_VIDEODRIVER', driver)
                        try:
                                pygame.display.init()
                        except pygame.error:
                                print 'Driver: {0} failed.'.format(driver)
                                continue
                        found = True
                        break
                if not found:
                        raise Exception('No suitable video driver found')
	    	signal(SIGALRM, alarm_handler)
		alarm(3)
		try:
		        pygame.init()
		        DISPLAYSURFACE = pygame.display.set_mode((320, 240)) 
		        alarm(0)
	        except Alarm:
		        raise KeyboardInterrupt                
                size = (pygame.display.Info().current_w, pygame.display.Info().current_h)
                self.screen = pygame.display.set_mode(size, pygame.FULLSCREEN)
                #Clear the screen to start
                self.screen.fill((0,0,0))
                # Initlialize font support
                pygame.font.init()
                # Render the screen
                pygame.display.update()
        def __del__(self):
                "Destructor to make sure py game shuts down, etc."
# Create an instance of the PyScope class
mytft = pitft()
pygame.mouse.set_visible(False)
fontpath = pygame.font.match_font('dejavusansmono')
font = pygame.font.Font(fontpath, 20)
fontSm = pygame.font.Font(fontpath, 18)
def task_weather():
        while True:
                # retrieve data from weather.com
                weather_com_result = pywapi.get_weather_from_weather_com(weatherDotComLocationCode)
                # extract current data for today
                location = weather_com_result['location']['name']
                today = weather_com_result['forecasts'][0]['day_of_week'][0:3] + " " \
                        + weather_com_result['forecasts'][0]['date'][4:] + " " \
                        + weather_com_result['forecasts'][0]['date'][:3]
                #windSpeed = int(weather_com_result['current_conditions']['wind']['speed']) / kphToMph
                #currWind = "{:.0f}mph ".format(windSpeed) + weather_com_result['current_conditions']['wind']['text']
                currTemp = weather_com_result['current_conditions']['temperature'] + u'\N{DEGREE SIGN}' + "C"
                currPress = weather_com_result['current_conditions']['barometer']['reading'][:-3] + "mb"
                uv = "UV {}".format(weather_com_result['current_conditions']['uv']['text'])
                humid = "Hum {}%".format(weather_com_result['current_conditions']['humidity'])
                
                # extract forecast data
                forecastDays = {}
                forecaseHighs = {}
                forecaseLows = {}
                forecastPrecips = {}
                forecastWinds = {}
                
                start = 0
                try:
                        test = float(weather_com_result['forecasts'][0]['day']['wind']['speed'])
                except ValueError:
                        start = 1
                
                for i in range(start, 5):
                
                        if not(weather_com_result['forecasts'][i]):
                                break
                        forecastDays[i] = weather_com_result['forecasts'][i]['day_of_week'][0:3]
                        forecaseHighs[i] = weather_com_result['forecasts'][i]['high'] + u'\N{DEGREE SIGN}' + "C"
                        forecaseLows[i] = weather_com_result['forecasts'][i]['low'] + u'\N{DEGREE SIGN}' + "C"
                        forecastPrecips[i] = weather_com_result['forecasts'][i]['day']['chance_precip'] + "%"
                        forecastWinds[i] = "{:.0f}".format(int(weather_com_result['forecasts'][i]['day']['wind']['speed'])  / kphToMph) + \
                                           weather_com_result['forecasts'][i]['day']['wind']['text']        
                # blank the screen
                mytft.screen.fill(colourBlack)
                
                # render the weather logo at 0,0
                icon = "./" + "%02d" % int(weather_com_result['current_conditions']['icon']) + ".png"
                logo = pygame.image.load(icon).convert()
                mytft.screen.blit(logo, (0,0))
                
                # set the anchor for the current weather data text
                textAnchorX = 140
                textAnchorY = 5
                textYoffset = 20
                
                # add current weather data text artifacts to the screen
                text_surface = font.render(location, True, (0,255,0))
                mytft.screen.blit(text_surface, (textAnchorX, textAnchorY))
                textAnchorY+=textYoffset
                x = strftime("%H:%M:%S", localtime())
                text_surface = font.render(x , True, colourWhite)
                mytft.screen.blit(text_surface, (textAnchorX, textAnchorY))
                textAnchorY+=textYoffset
                text_surface = font.render(today, True, colourWhite)
                mytft.screen.blit(text_surface, (textAnchorX, textAnchorY))
                textAnchorY+=textYoffset
                text_surface = font.render(currTemp, True, colourWhite)
                mytft.screen.blit(text_surface, (textAnchorX, textAnchorY))
                textAnchorY+=textYoffset
                
                text_surface = font.render(currPress, True, colourWhite)
                mytft.screen.blit(text_surface, (textAnchorX, textAnchorY))
                textAnchorY+=textYoffset
                # text_surface = font.render(uv, True, colourWhite)
                # mytft.screen.blit(text_surface, (textAnchorX, textAnchorY))
                # textAnchorY+=textYoffset
                text_surface = font.render(humid, True, colourWhite)
                mytft.screen.blit(text_surface, (textAnchorX, textAnchorY))
                # set X axis text anchor for the forecast text
                textAnchorX = 0
                textXoffset = 80
        
                # add each days forecast text
                for i in forecastDays:
                        textAnchorY = 130
                        text_surface = fontSm.render(forecastDays[int(i)], True, colourWhite)
                        mytft.screen.blit(text_surface, (textAnchorX, textAnchorY))
                        textAnchorY+=textYoffset
                        text_surface = fontSm.render(forecaseHighs[int(i)], True, colourWhite)
                        mytft.screen.blit(text_surface, (textAnchorX, textAnchorY))
                        textAnchorY+=textYoffset
                        text_surface = fontSm.render(forecaseLows[int(i)], True, colourWhite)
                        mytft.screen.blit(text_surface, (textAnchorX, textAnchorY))
                        textAnchorY+=textYoffset
                        text_surface = fontSm.render(forecastPrecips[int(i)], True, colourWhite)
                        mytft.screen.blit(text_surface, (textAnchorX, textAnchorY))
                        textAnchorY+=textYoffset
                        text_surface = fontSm.render(forecastWinds[int(i)], True, colourWhite)
                        mytft.screen.blit(text_surface, (textAnchorX, textAnchorY))
                        textAnchorX+=textXoffset
                
                # refresh the screen with all the changes
                pygame.display.update()
                        
                # Wait
                time.sleep(updateRate)
if __name__ == "__main__":
	task_weather()        
 | |
| 
	# coding=utf-8
# Copyright 2021, The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch BlenderbotSmall model. """
import tempfile
import unittest
from transformers import BlenderbotSmallConfig, is_torch_available
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch, slow, torch_device
from ..generation.test_generation_utils import GenerationTesterMixin
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
    import torch
    from transformers import BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallTokenizer
    from transformers.models.blenderbot_small.modeling_blenderbot_small import (
        BlenderbotSmallDecoder,
        BlenderbotSmallEncoder,
        BlenderbotSmallForCausalLM,
    )
def prepare_blenderbot_small_inputs_dict(
    config,
    input_ids,
    decoder_input_ids,
    attention_mask=None,
    decoder_attention_mask=None,
    head_mask=None,
    decoder_head_mask=None,
    cross_attn_head_mask=None,
):
    if attention_mask is None:
        attention_mask = input_ids.ne(config.pad_token_id)
    if decoder_attention_mask is None:
        decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id)
    if head_mask is None:
        head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device)
    if decoder_head_mask is None:
        decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device)
    if cross_attn_head_mask is None:
        cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device)
    return {
        "input_ids": input_ids,
        "decoder_input_ids": decoder_input_ids,
        "attention_mask": attention_mask,
        "decoder_attention_mask": attention_mask,
        "head_mask": head_mask,
        "decoder_head_mask": decoder_head_mask,
        "cross_attn_head_mask": cross_attn_head_mask,
    }
class BlenderbotSmallModelTester:
    def __init__(
        self,
        parent,
        batch_size=13,
        seq_length=7,
        is_training=True,
        use_labels=False,
        vocab_size=99,
        hidden_size=16,
        num_hidden_layers=2,
        num_attention_heads=4,
        intermediate_size=4,
        hidden_act="gelu",
        hidden_dropout_prob=0.1,
        attention_probs_dropout_prob=0.1,
        max_position_embeddings=20,
        eos_token_id=2,
        pad_token_id=1,
        bos_token_id=0,
    ):
        self.parent = parent
        self.batch_size = batch_size
        self.seq_length = seq_length
        self.is_training = is_training
        self.use_labels = use_labels
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
        self.num_hidden_layers = num_hidden_layers
        self.num_attention_heads = num_attention_heads
        self.intermediate_size = intermediate_size
        self.hidden_act = hidden_act
        self.hidden_dropout_prob = hidden_dropout_prob
        self.attention_probs_dropout_prob = attention_probs_dropout_prob
        self.max_position_embeddings = max_position_embeddings
        self.eos_token_id = eos_token_id
        self.pad_token_id = pad_token_id
        self.bos_token_id = bos_token_id
    def prepare_config_and_inputs(self):
        input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(
            3,
        )
        input_ids[:, -1] = self.eos_token_id  # Eos Token
        decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
        config = self.get_config()
        inputs_dict = prepare_blenderbot_small_inputs_dict(config, input_ids, decoder_input_ids)
        return config, inputs_dict
    def get_config(self):
        return BlenderbotSmallConfig(
            vocab_size=self.vocab_size,
            d_model=self.hidden_size,
            encoder_layers=self.num_hidden_layers,
            decoder_layers=self.num_hidden_layers,
            encoder_attention_heads=self.num_attention_heads,
            decoder_attention_heads=self.num_attention_heads,
            encoder_ffn_dim=self.intermediate_size,
            decoder_ffn_dim=self.intermediate_size,
            dropout=self.hidden_dropout_prob,
            attention_dropout=self.attention_probs_dropout_prob,
            max_position_embeddings=self.max_position_embeddings,
            eos_token_id=self.eos_token_id,
            bos_token_id=self.bos_token_id,
            pad_token_id=self.pad_token_id,
        )
    def prepare_config_and_inputs_for_common(self):
        config, inputs_dict = self.prepare_config_and_inputs()
        return config, inputs_dict
    def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
        model = BlenderbotSmallModel(config=config).get_decoder().to(torch_device).eval()
        input_ids = inputs_dict["input_ids"]
        attention_mask = inputs_dict["attention_mask"]
        head_mask = inputs_dict["head_mask"]
        # first forward pass
        outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True)
        output, past_key_values = outputs.to_tuple()
        # create hypothetical multiple next token and extent to next_input_ids
        next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
        next_attn_mask = ids_tensor((self.batch_size, 3), 2)
        # append to next input_ids and
        next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
        next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
        output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
        output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
            "last_hidden_state"
        ]
        # select random slice
        random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
        output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
        output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
        self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
        # test that outputs are equal for slice
        self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
    def check_encoder_decoder_model_standalone(self, config, inputs_dict):
        model = BlenderbotSmallModel(config=config).to(torch_device).eval()
        outputs = model(**inputs_dict)
        encoder_last_hidden_state = outputs.encoder_last_hidden_state
        last_hidden_state = outputs.last_hidden_state
        with tempfile.TemporaryDirectory() as tmpdirname:
            encoder = model.get_encoder()
            encoder.save_pretrained(tmpdirname)
            encoder = BlenderbotSmallEncoder.from_pretrained(tmpdirname).to(torch_device)
        encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[
            0
        ]
        self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3)
        with tempfile.TemporaryDirectory() as tmpdirname:
            decoder = model.get_decoder()
            decoder.save_pretrained(tmpdirname)
            decoder = BlenderbotSmallDecoder.from_pretrained(tmpdirname).to(torch_device)
        last_hidden_state_2 = decoder(
            input_ids=inputs_dict["decoder_input_ids"],
            attention_mask=inputs_dict["decoder_attention_mask"],
            encoder_hidden_states=encoder_last_hidden_state,
            encoder_attention_mask=inputs_dict["attention_mask"],
        )[0]
        self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
class BlenderbotSmallModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
    all_model_classes = (BlenderbotSmallModel, BlenderbotSmallForConditionalGeneration) if is_torch_available() else ()
    all_generative_model_classes = (BlenderbotSmallForConditionalGeneration,) if is_torch_available() else ()
    is_encoder_decoder = True
    test_pruning = False
    test_missing_keys = False
    def setUp(self):
        self.model_tester = BlenderbotSmallModelTester(self)
        self.config_tester = ConfigTester(self, config_class=BlenderbotSmallConfig)
    def test_config(self):
        self.config_tester.run_common_tests()
    def test_save_load_strict(self):
        config, inputs_dict = self.model_tester.prepare_config_and_inputs()
        for model_class in self.all_model_classes:
            model = model_class(config)
            with tempfile.TemporaryDirectory() as tmpdirname:
                model.save_pretrained(tmpdirname)
                model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
            self.assertEqual(info["missing_keys"], [])
    def test_decoder_model_past_with_large_inputs(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
    def test_encoder_decoder_model_standalone(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
        self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs)
    def test_generate_fp16(self):
        config, input_dict = self.model_tester.prepare_config_and_inputs()
        input_ids = input_dict["input_ids"]
        attention_mask = input_ids.ne(1).to(torch_device)
        model = BlenderbotSmallForConditionalGeneration(config).eval().to(torch_device)
        if torch_device == "cuda":
            model.half()
        model.generate(input_ids, attention_mask=attention_mask)
        model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
    """If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
    if a is None and b is None:
        return True
    try:
        if torch.allclose(a, b, atol=atol):
            return True
        raise
    except Exception:
        pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
        if a.numel() > 100:
            msg = f"tensor values are {pct_different:.1%} percent different."
        else:
            msg = f"{a} != {b}"
        if prefix:
            msg = prefix + ": " + msg
        raise AssertionError(msg)
@require_torch
class Blenderbot90MIntegrationTests(unittest.TestCase):
    ckpt = "facebook/blenderbot-90M"
    @cached_property
    def model(self):
        model = BlenderbotSmallForConditionalGeneration.from_pretrained(self.ckpt).to(torch_device)
        if torch_device == "cuda":
            model = model.half()
        return model
    @cached_property
    def tokenizer(self):
        return BlenderbotSmallTokenizer.from_pretrained(self.ckpt)
    @slow
    def test_90_generation_from_long_input(self):
        src_text = [
            "Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like\
       i'm going to throw up.\nand why is that?"
        ]
        model_inputs = self.tokenizer(src_text, return_tensors="pt").to(torch_device)
        assert isinstance(self.tokenizer, BlenderbotSmallTokenizer)
        generated_ids = self.model.generate(**model_inputs)[0]
        reply = self.tokenizer.decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
        assert reply in (
            "i don't know. i just feel like i'm going to throw up. it's not fun.",
            "i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
        )
    @slow
    def test_90_generation_from_short_input(self):
        model_inputs = self.tokenizer(["sam"], return_tensors="pt").to(torch_device)
        generated_utterances = self.model.generate(**model_inputs)
        clean_txt = self.tokenizer.decode(
            generated_utterances[0], skip_special_tokens=True, clean_up_tokenization_spaces=True
        )
        assert clean_txt in (
            "have you ever been to a sam club? it's a great club in the south.",
            "have you ever heard of sam harris? he's an american singer, songwriter, and actor.",
        )
class BlenderbotSmallStandaloneDecoderModelTester:
    def __init__(
        self,
        parent,
        vocab_size=99,
        batch_size=13,
        d_model=16,
        decoder_seq_length=7,
        is_training=True,
        is_decoder=True,
        use_attention_mask=True,
        use_cache=False,
        use_labels=True,
        decoder_start_token_id=2,
        decoder_ffn_dim=32,
        decoder_layers=4,
        encoder_attention_heads=4,
        decoder_attention_heads=4,
        max_position_embeddings=30,
        is_encoder_decoder=False,
        pad_token_id=0,
        bos_token_id=1,
        eos_token_id=2,
        scope=None,
    ):
        self.parent = parent
        self.batch_size = batch_size
        self.decoder_seq_length = decoder_seq_length
        # For common tests
        self.seq_length = self.decoder_seq_length
        self.is_training = is_training
        self.use_attention_mask = use_attention_mask
        self.use_labels = use_labels
        self.vocab_size = vocab_size
        self.d_model = d_model
        self.hidden_size = d_model
        self.num_hidden_layers = decoder_layers
        self.decoder_layers = decoder_layers
        self.decoder_ffn_dim = decoder_ffn_dim
        self.encoder_attention_heads = encoder_attention_heads
        self.decoder_attention_heads = decoder_attention_heads
        self.num_attention_heads = decoder_attention_heads
        self.eos_token_id = eos_token_id
        self.bos_token_id = bos_token_id
        self.pad_token_id = pad_token_id
        self.decoder_start_token_id = decoder_start_token_id
        self.use_cache = use_cache
        self.max_position_embeddings = max_position_embeddings
        self.is_encoder_decoder = is_encoder_decoder
        self.scope = None
        self.decoder_key_length = decoder_seq_length
        self.base_model_out_len = 2
        self.decoder_attention_idx = 1
    def prepare_config_and_inputs(self):
        input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
        attention_mask = None
        if self.use_attention_mask:
            attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)
        lm_labels = None
        if self.use_labels:
            lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
        config = BlenderbotSmallConfig(
            vocab_size=self.vocab_size,
            d_model=self.d_model,
            decoder_layers=self.decoder_layers,
            decoder_ffn_dim=self.decoder_ffn_dim,
            encoder_attention_heads=self.encoder_attention_heads,
            decoder_attention_heads=self.decoder_attention_heads,
            eos_token_id=self.eos_token_id,
            bos_token_id=self.bos_token_id,
            use_cache=self.use_cache,
            pad_token_id=self.pad_token_id,
            decoder_start_token_id=self.decoder_start_token_id,
            max_position_embeddings=self.max_position_embeddings,
            is_encoder_decoder=self.is_encoder_decoder,
        )
        return (
            config,
            input_ids,
            attention_mask,
            lm_labels,
        )
    def create_and_check_decoder_model_past(
        self,
        config,
        input_ids,
        attention_mask,
        lm_labels,
    ):
        config.use_cache = True
        model = BlenderbotSmallDecoder(config=config).to(torch_device).eval()
        # first forward pass
        outputs = model(input_ids, use_cache=True)
        outputs_use_cache_conf = model(input_ids)
        outputs_no_past = model(input_ids, use_cache=False)
        self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
        self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
        past_key_values = outputs["past_key_values"]
        # create hypothetical next token and extent to next_input_ids
        next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
        # append to next input_ids and
        next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
        output_from_no_past = model(next_input_ids)["last_hidden_state"]
        output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"]
        # select random slice
        random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
        output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
        output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
        # test that outputs are equal for slice
        assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)
    def create_and_check_decoder_model_attention_mask_past(
        self,
        config,
        input_ids,
        attention_mask,
        lm_labels,
    ):
        model = BlenderbotSmallDecoder(config=config).to(torch_device).eval()
        # create attention mask
        attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
        half_seq_length = input_ids.shape[-1] // 2
        attn_mask[:, half_seq_length:] = 0
        # first forward pass
        past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"]
        # create hypothetical next token and extent to next_input_ids
        next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
        # change a random masked slice from input_ids
        random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
        random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
        input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
        # append to next input_ids and attn_mask
        next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
        attn_mask = torch.cat(
            [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
            dim=1,
        )
        # get two different outputs
        output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
        output_from_past = model(next_tokens, past_key_values=past_key_values, attention_mask=attn_mask)[
            "last_hidden_state"
        ]
        # select random slice
        random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
        output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
        output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
        # test that outputs are equal for slice
        assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)
    def prepare_config_and_inputs_for_common(self):
        config_and_inputs = self.prepare_config_and_inputs()
        (
            config,
            input_ids,
            attention_mask,
            lm_labels,
        ) = config_and_inputs
        inputs_dict = {
            "input_ids": input_ids,
            "attention_mask": attention_mask,
        }
        return config, inputs_dict
@require_torch
class BlenderbotSmallStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
    all_model_classes = (BlenderbotSmallDecoder, BlenderbotSmallForCausalLM) if is_torch_available() else ()
    all_generative_model_classes = (BlenderbotSmallForCausalLM,) if is_torch_available() else ()
    test_pruning = False
    is_encoder_decoder = False
    def setUp(
        self,
    ):
        self.model_tester = BlenderbotSmallStandaloneDecoderModelTester(self, is_training=False)
        self.config_tester = ConfigTester(self, config_class=BlenderbotSmallConfig)
    def test_config(self):
        self.config_tester.run_common_tests()
    def test_decoder_model_past(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_decoder_model_past(*config_and_inputs)
    def test_decoder_model_attn_mask_past(self):
        config_and_inputs = self.model_tester.prepare_config_and_inputs()
        self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
    def test_retain_grad_hidden_states_attentions(self):
        # decoder cannot keep gradients
        return
 | |
| 
	# Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base models for point-cloud based detection."""
from lingvo import compat as tf
from lingvo.core import base_model
from lingvo.core import py_utils
from lingvo.tasks.car import detection_3d_lib
from lingvo.tasks.car import detection_decoder
from lingvo.tasks.car import kitti_decoder
import numpy as np
class PointDetectorBase(base_model.BaseTask):
  """Base class for implementing point-based detectors.
  Subclasses should implement _BBoxesAndLogits() to compute the bounding box and
  scores given an input batch, and specify an appropriate decoder
  implementation.
  """
  @classmethod
  def Params(cls, num_classes):
    p = super().Params()
    # We choose a high number of boxes per example by default to bound overall
    # runtime but not so low that we end up missing real boxes from complicated
    # scenes.
    p.Define('num_classes', num_classes,
             'The number of classes, including the background class.')
    p.Define(
        'max_nms_boxes', 1024,
        'Maximum number of boxes per example to emit from non-max-suppression.')
    p.Define(
        'nms_iou_threshold', 0.3,
        'NMS IoU threshold for suppressing overlapping boxes. '
        'Can either be a float or a list of len num_classes.')
    p.Define(
        'nms_score_threshold', 0.01, 'NMS threshold for scores. '
        'Can either be a float or a list of len num_classes. '
        'It is recommended that this be 1 for all non-active classes '
        'like background.')
    p.Define(
        'visualization_classification_threshold', 0.25,
        'Classification score threshold for determining if a prediction '
        'is positive for the purposes of visualizations.')
    p.Define('output_decoder', kitti_decoder.KITTIDecoder.Params(),
             'Implementation of decoder.')
    p.Define(
        'use_oriented_per_class_nms', False,
        'Whether to use oriented per class nms or single class non-oriented.')
    p.Define(
        'inference_batch_size', None,
        'If specified, hardcodes the inference batch size to this value. '
        'Useful mostly for computing the FLOPS of a model so that the shape is '
        'fully defined.')
    p.Define(
        'decode_include_residuals', False,
        'If True, includes the residuals and ground truth anchors in the '
        'decoder output dictionary. This can be helpful for downstream '
        'analysis.')
    return p
  def __init__(self, params):
    super().__init__(params)
    p = self.params
    self._utils_3d = detection_3d_lib.Utils3D()
    self.CreateChild('output_decoder', p.output_decoder)
  def CreateDecoderMetrics(self):
    """Create decoder metrics."""
    return self.output_decoder.CreateDecoderMetrics()
  def _BBoxesAndLogits(self, input_batch, predictions):
    """Fetch and return the bounding boxes and logits from an input.
    Args:
      input_batch: The input batch from which to produce boxes and logits.
      predictions: The output dictionary of ComputePredictions.
    Returns:
      A .NestedMap containing
      - predicted_bboxes: A [batch_size, num_boxes, 7] floating point Tensor.
      - classification_logits: A [batch_size, num_boxes, num_classes] floating
        point Tensor.
    """
    raise NotImplementedError('_BoxesAndLogits method not implemented.')
  def _Placeholders(self):
    """Return a NestedMap of placeholders to fill in for inference.
    Runs the configured input pipeline to generate the expected shapes and types
    of the inputs.
    Returns:
      A NestedMap of placeholders matching the input structure of
       the inference model.
    """
    p = self.params
    with tf.Graph().as_default():
      inputs = self.params.input.Instantiate()
    # Turn those inputs into placeholders.
    placeholders = []
    for input_shape, dtype in zip(inputs.Shape().Flatten(),
                                  inputs.DType().Flatten()):
      batched_input_shape = [p.inference_batch_size] + input_shape.as_list()
      placeholders.append(tf.placeholder(dtype, batched_input_shape))
    result = inputs.DType().Pack(placeholders)
    return result
  def _BBoxDimensionErrors(self,
                           gt_bboxes,
                           pred_bboxes,
                           regression_weights,
                           epsilon=1e-6):
    """Calculates the errors per bounding box dimension for assigned anchors.
    Args:
      gt_bboxes: float Tensor of shape [..., 7] with the ground truth bounding
        box for each anchor.
      pred_bboxes: float Tensor of shape [..., 7] with the predicted bounding
        box for each anchor.
      regression_weights: float Tensor with 0/1 weights indicating whether the
        anchor had a positive assignment with same base shape as `gt_bboxes` and
        `pred_bboxes` excluding the last dimension.
      epsilon: A float epsilon for the denominiator of our MaskedAverage.
    Returns:
      A metrics dict with mean bounding box errors for all positive assigned
      anchor locations.
    """
    if py_utils.GetShape(gt_bboxes)[-1] != 7:
      raise ValueError('`gt_bboxes` last dimension should be 7.')
    if py_utils.GetShape(pred_bboxes)[-1] != 7:
      raise ValueError('`pred_bboxes` last dimension should be 7.')
    batch_size = py_utils.GetShape(pred_bboxes)[0]
    # Get the leading dims for later (the -1 is to exclude the last dim).
    leading_dims = len(py_utils.GetShape(pred_bboxes)) - 1
    sum_regression_weights = tf.reduce_sum(regression_weights) + epsilon
    def _MaskedAverage(value, axis=None):
      return (tf.reduce_sum(value * regression_weights, axis=axis) /
              sum_regression_weights)
    center_error = tf.linalg.norm(
        gt_bboxes[..., :3] - pred_bboxes[..., :3], axis=-1, keepdims=True)
    mean_center_error = _MaskedAverage(center_error)
    # Dimension error as shape [3] so we can get separate height, width, length
    mean_dimension_error = _MaskedAverage(
        gt_bboxes[..., 3:6] - pred_bboxes[..., 3:6],
        axis=list(range(leading_dims)))
    # Angular error in degrees
    mean_angular_error_rad = _MaskedAverage(gt_bboxes[..., 6:] -
                                            pred_bboxes[..., 6:])
    mean_angular_error_deg = mean_angular_error_rad * (180 / np.pi)
    return {
        'error/center_distance': (mean_center_error, batch_size),
        'error/length': (mean_dimension_error[0], batch_size),
        'error/width': (mean_dimension_error[1], batch_size),
        'error/height': (mean_dimension_error[2], batch_size),
        'error/rotation_deg': (mean_angular_error_deg, batch_size),
    }
  def Inference(self):
    """Builds the inference graph.
    Default subgraph should return:
      predicted_bboxes: A [batch_size, num_boxes, 7] float Tensor.
      classification_scores: A [batch_size, num_boxes, num_classes] float
      Tensor.
    Returns:
      A dictionary whose values are a tuple of fetches and feeds.
    """
    p = self.params
    subgraphs = {}
    with tf.name_scope('inference'):
      input_placeholders = self._Placeholders()
      predictions = self.ComputePredictions(self.theta, input_placeholders)
      bboxes_and_logits = self._BBoxesAndLogits(input_placeholders, predictions)
      predicted_bboxes = bboxes_and_logits.predicted_bboxes
      classification_logits = bboxes_and_logits.classification_logits
      classification_scores = tf.sigmoid(classification_logits)
      _, per_cls_bboxes, per_cls_bbox_scores, per_cls_valid_mask = (
          detection_decoder.DecodeWithNMS(
              predicted_bboxes,
              classification_scores,
              nms_iou_threshold=p.nms_iou_threshold,
              score_threshold=p.nms_score_threshold,
              max_boxes_per_class=p.max_nms_boxes,
              use_oriented_per_class_nms=p.use_oriented_per_class_nms))
      per_cls_bbox_scores *= per_cls_valid_mask
      # TODO(vrv): Fix the inference graph for KITTI, since we need
      # to apply frustum clipping.  This requires customizing the
      # inference placeholders for each model.
      fetches = {
          'per_class_predicted_bboxes': per_cls_bboxes,
          'per_class_predicted_bbox_scores': per_cls_bbox_scores,
          'per_class_valid_mask': per_cls_valid_mask
      }
      subgraphs['default'] = fetches, dict(input_placeholders.FlattenItems())
    return subgraphs
  # TODO(bencaine): Reduce code duplication between Inference/Decode.
  def Decode(self, input_batch):
    """Decode an input batch, computing predicted bboxes from residuals."""
    p = self.params
    predictions = self.ComputePredictions(self.theta, input_batch)
    bboxes_and_logits = self._BBoxesAndLogits(input_batch, predictions)
    predicted_bboxes = bboxes_and_logits.predicted_bboxes
    batch_size, num_bboxes, _ = py_utils.GetShape(predicted_bboxes, 3)
    classification_logits = bboxes_and_logits.classification_logits
    classification_logits = py_utils.HasShape(
        classification_logits, [batch_size, num_bboxes, p.num_classes])
    classification_scores = tf.sigmoid(classification_logits)
    _, per_example_dict = self.ComputeLoss(self.theta, predictions, input_batch)
    if 'score_scaler' in per_example_dict:
      classification_scores *= per_example_dict['score_scaler']
    with tf.device('/cpu:0'):
      # Decode the predicted bboxes, performing NMS.
      per_cls_idxs, per_cls_bboxes, per_cls_bbox_scores, per_cls_valid_mask = (
          detection_decoder.DecodeWithNMS(
              predicted_bboxes,
              classification_scores,
              nms_iou_threshold=p.nms_iou_threshold,
              score_threshold=p.nms_score_threshold,
              max_boxes_per_class=p.max_nms_boxes,
              use_oriented_per_class_nms=p.use_oriented_per_class_nms))
      # per_cls_valid_mask is [batch, num_classes, num_boxes] Tensor that
      # indicates which boxes were selected by NMS. Each example will have a
      # different number of chosen bboxes, so the mask is present to allow us
      # to keep the boxes as a batched dense Tensor.
      #
      # We mask the scores by the per_cls_valid_mask so that none of these boxes
      # will be interpreted as valid.
      per_cls_bbox_scores *= per_cls_valid_mask
      visualization_weights = py_utils.HasShape(
          per_cls_bbox_scores, [batch_size, p.num_classes, p.max_nms_boxes])
      # For top down visualization, filter boxes whose scores are not above the
      # visualization threshold.
      visualization_weights = tf.where(
          tf.greater_equal(visualization_weights,
                           p.visualization_classification_threshold),
          visualization_weights, tf.zeros_like(visualization_weights))
    model_outputs = py_utils.NestedMap()
    model_outputs.per_class_predicted_bboxes = per_cls_bboxes
    model_outputs.per_class_predicted_bbox_scores = per_cls_bbox_scores
    model_outputs.per_class_valid_mask = per_cls_valid_mask
    decoder_outputs = py_utils.NestedMap({
        'per_class_predicted_bboxes': per_cls_bboxes,
        'per_class_predicted_bbox_scores': per_cls_bbox_scores,
        'per_class_valid_mask': per_cls_valid_mask,
        'visualization_weights': visualization_weights,
    })
    if p.decode_include_residuals:
      # Including the residuals in the decoder output makes it possible to save
      # the outputs for further analysis. Note that we ensure that the outputs
      # match the per-class NMS output format of [batch, num_classes, ...].
      def _ReshapeGather(tensor):
        """Reshapes tensor and then gathers using the nms indices."""
        tensor = tf.gather(
            tf.reshape(tensor, [batch_size, num_bboxes, -1]),
            per_cls_idxs,
            batch_dims=1)
        if not p.use_oriented_per_class_nms:
          # Tile so that the data fits the expected per class shape of
          # [batch_size, num_classes, ...]. When *not* using oriented NMS, the
          # num_classes dimension will be missing since the indices will not
          # have it.
          tensor = tf.tile(tensor[:, tf.newaxis, :, :],
                           [1, p.num_classes, 1, 1])
        return tensor
      decoder_outputs.update({
          'per_class_gt_residuals':
              _ReshapeGather(input_batch.anchor_localization_residuals),
          'per_class_gt_labels':
              _ReshapeGather(input_batch.assigned_gt_labels),
          'per_class_residuals':
              _ReshapeGather(predictions.residuals),
          'per_class_logits':
              _ReshapeGather(predictions.classification_logits),
          'per_class_anchor_boxes':
              _ReshapeGather(input_batch.anchor_bboxes),
      })
    decoder_outputs.update(
        self.output_decoder.ProcessOutputs(input_batch, model_outputs))
    # Produce global step as an output (which is the step
    # of the checkpoint being decoded.)
    decoder_outputs.global_step = py_utils.GetGlobalStep()
    return decoder_outputs
  def PostProcessDecodeOut(self, dec_out_dict, dec_metrics_dict):
    return self.output_decoder.PostProcessDecodeOut(dec_out_dict,
                                                    dec_metrics_dict)
  def DecodeFinalize(self, decode_finalize_args):
    decode_out_path = decode_finalize_args.decode_out_path
    decode_out = decode_finalize_args.decode_out
    if not decode_out:
      return
    # Write out a tf record file for all values in decode_out.
    with tf.io.TFRecordWriter(decode_out_path) as f:
      for _, v in decode_out:
        f.write(v)
 | |
| 
	""".. Ignore pydocstyle D400.
============================
Documentation from Processes
============================
Sphinx extension for semi-automatic documentation of Resolwe processes.
This module introduces a new directive that enables user to document a
set of processes. The directive can be used in three variations::
    .. autoprocess::
    .. autoprocess:: category_name
    .. autoprocess:: process_slug
The first option documents *all* processes.
The second option documents just the ones that have *category* equal to
``category_name``. This means that subcategories (e.g. ``analyses:alignment``,
``analyses:variants``) of a category (e.g. ``analyses``) will not be documented.
Third option documents only one process: the one with *slug* equal to
``process_slug``.
"""
import fnmatch
import os
import re
from itertools import groupby
from operator import itemgetter
import yaml
from docutils import nodes
from docutils.parsers.rst import Directive
from sphinx import addnodes
from resolwe.flow.utils import iterate_schema
PROCESS_CACHE = None
def get_process_definition_start(fname, slug):
    """Find the first line of process definition.
    The first line of process definition is the line with a slug.
    :param str fname: Path to filename with processes
    :param string slug: process slug
    :return: line where the process definiton starts
    :rtype: int
    """
    with open(fname) as file_:
        for i, line in enumerate(file_):
            if re.search(r'slug:\s*{}'.format(slug), line):
                return i + 1
    # In case starting line is not found just return first line
    return 1
def get_processes(process_dir, base_source_uri):
    """Find processes in path.
    :param str process_dir: Path to the directory where to search for processes
    :param str base_source_uri: Base URL of the source code repository with process definitions
    :return: Dictionary of processes where keys are URLs pointing to processes'
    source code and values are processes' definitions parsed from YAML files
    :rtype: dict
    :raises: ValueError: if multiple processes with the same slug are found
    """
    global PROCESS_CACHE  # pylint: disable=global-statement
    if PROCESS_CACHE is not None:
        return PROCESS_CACHE
    all_process_files = []
    process_file_extensions = ['*.yaml', '*.yml']
    for root, _, filenames in os.walk(process_dir):
        for extension in process_file_extensions:
            for filename in fnmatch.filter(filenames, extension):
                all_process_files.append(os.path.join(root, filename))
    def read_yaml_file(fname):
        """Read the yaml file."""
        with open(fname) as f:
            return yaml.load(f)
    processes = []
    for process_file in all_process_files:
        processes_in_file = read_yaml_file(process_file)
        for process in processes_in_file:
            # This section finds the line in file where the
            # defintion of the process starts. (there are
            # multiple process definition in some files).
            startline = get_process_definition_start(process_file, process['slug'])
            # Put together URL to starting line of process definition.
            process['source_uri'] = base_source_uri + process_file[len(process_dir) + 1:] + '#L' + str(startline)
            if 'category' not in process:
                process['category'] = 'uncategorized'
            processes.append(process)
    PROCESS_CACHE = processes
    return processes
class AutoProcessDirective(Directive):
    """Automatically document Resolwe processes."""
    has_content = True
    required_arguments = 0
    optional_arguments = 0
    final_argument_whitespace = False
    option_spec = None
    def make_field(self, field_name, field_body):
        """Fill content into nodes.
        :param string field_name: Field name of the field
        :param field_name: Field body if the field
        :type field_name: str or instance of docutils.nodes
        :return: field instance filled with given name and body
        :rtype: nodes.field
        """
        name = nodes.field_name()
        name += nodes.Text(field_name)
        paragraph = nodes.paragraph()
        if isinstance(field_body, str):
            # This is the case when field_body is just a string:
            paragraph += nodes.Text(field_body)
        else:
            # This is the case when field_body is a complex node:
            # useful when constructing nested field lists
            paragraph += field_body
        body = nodes.field_body()
        body += paragraph
        field = nodes.field()
        field.extend([name, body])
        return field
    def make_properties_list(self, field):
        """Fill the ``field`` into a properties list and return it.
        :param dict field: the content of the property list to make
        :return: field_list instance filled with given field
        :rtype: nodes.field_list
        """
        properties_list = nodes.field_list()
        # changing the order of elements in this list affects
        # the order in which they are displayed
        property_names = ['label', 'type', 'description', 'required',
                          'disabled', 'hidden', 'default', 'placeholder',
                          'validate_regex', 'choices', 'collapse', 'group']
        for name in property_names:
            if name not in field:
                continue
            value = field[name]
            # Value should be formatted in code-style (=literal) mode
            if name in ['type', 'default', 'placeholder', 'validate_regex']:
                literal_node = nodes.literal(str(value), str(value))
                properties_list += self.make_field(name, literal_node)
            # Special formating of ``value`` is needed if name == 'choices'
            elif name == 'choices':
                bullet_list = nodes.bullet_list()
                for choice in value:
                    label = nodes.Text(choice['label'] + ': ')
                    val = nodes.literal(choice['value'], choice['value'])
                    paragraph = nodes.paragraph()
                    paragraph += label
                    paragraph += val
                    list_item = nodes.list_item()
                    list_item += paragraph
                    bullet_list += list_item
                properties_list += self.make_field(name, bullet_list)
            else:
                properties_list += self.make_field(name, str(value))
        return properties_list
    def make_process_header(self, slug, typ, version, source_uri, description, inputs):
        """Generate a process definition header.
        :param str slug: process' slug
        :param str typ: process' type
        :param str version:  process' version
        :param str source_uri: url to the process definition
        :param str description: process' description
        :param dict inputs: process' inputs
        """
        node = addnodes.desc()
        signode = addnodes.desc_signature(slug, '')
        node.append(signode)
        node['objtype'] = node['desctype'] = typ
        signode += addnodes.desc_annotation(typ, typ, classes=['process-type'])
        signode += addnodes.desc_addname('', '')
        signode += addnodes.desc_name(slug + ' ', slug + ' ')
        paramlist = addnodes.desc_parameterlist()
        for field_schema, _, _ in iterate_schema({}, inputs, ''):
            field_type = field_schema['type']
            field_name = field_schema['name']
            field_default = field_schema.get('default', None)
            field_default = '' if field_default is None else '={}'.format(field_default)
            param = addnodes.desc_parameter('', '', noemph=True)
            param += nodes.emphasis(field_type, field_type, classes=['process-type'])
            # separate by non-breaking space in the output
            param += nodes.strong(text='\xa0\xa0' + field_name)
            paramlist += param
        signode += paramlist
        signode += nodes.reference('', nodes.Text('[Source: v{}]'.format(version)),
                                   refuri=source_uri, classes=['viewcode-link'])
        desc = nodes.paragraph()
        desc += nodes.Text(description, description)
        return [node, desc]
    def make_process_node(self, process):
        """Fill the content of process definiton node.
        :param dict process: process data as given from yaml.load function
        :return: process node
        """
        name = process['name']
        slug = process['slug']
        typ = process['type']
        version = process['version']
        description = process.get('description', '')
        source_uri = process['source_uri']
        inputs = process.get('input', [])
        outputs = process.get('output', [])
        # Make process name a section title:
        section = nodes.section(ids=['process-' + slug])
        section += nodes.title(name, name)
        # Make process header:
        section += self.make_process_header(slug, typ, version, source_uri, description, inputs)
        # Make inputs section:
        container_node = nodes.container(classes=['toggle'])
        container_header = nodes.paragraph(classes=['header'])
        container_header += nodes.strong(text='Input arguments')
        container_node += container_header
        container_body = nodes.container()
        for field_schema, _, path in iterate_schema({}, inputs, ''):
            container_body += nodes.strong(text=path)
            container_body += self.make_properties_list(field_schema)
        container_node += container_body
        section += container_node
        # Make outputs section:
        container_node = nodes.container(classes=['toggle'])
        container_header = nodes.paragraph(classes=['header'])
        container_header += nodes.strong(text='Output results')
        container_node += container_header
        container_body = nodes.container()
        for field_schema, _, path in iterate_schema({}, outputs, ''):
            container_body += nodes.strong(text=path)
            container_body += self.make_properties_list(field_schema)
        container_node += container_body
        section += container_node
        return [section, addnodes.index(entries=[('single', name, 'process-' + slug, '', None)])]
    def run(self):
        """Create a list of process definitions."""
        config = self.state.document.settings.env.config
        # Get all processes:
        processes = get_processes(config.autoprocess_process_dir, config.autoprocess_source_base_url)
        process_nodes = []
        for process in sorted(processes, key=itemgetter('name')):
            process_nodes.extend(self.make_process_node(process))
        return process_nodes
class AutoProcessCategoryDirective(Directive):
    """Automatically document process categories."""
    has_content = True
    required_arguments = 0
    optional_arguments = 0
    final_argument_whitespace = False
    option_spec = None
    def run(self):
        """Create a category tree."""
        config = self.state.document.settings.env.config
        # Group processes by category
        processes = get_processes(config.autoprocess_process_dir, config.autoprocess_source_base_url)
        processes.sort(key=itemgetter('category'))
        categorized_processes = {k: list(g) for k, g in groupby(processes, itemgetter('category'))}
        # Build category tree
        category_sections = {'': nodes.container(ids=['categories'])}
        top_categories = []
        for category in sorted(categorized_processes.keys()):
            category_path = ''
            for category_node in category.split(':'):
                parent_category_path = category_path
                category_path += '{}:'.format(category_node)
                if category_path in category_sections:
                    continue
                category_name = category_node.capitalize()
                section = nodes.section(ids=['category-' + category_node])
                section += nodes.title(category_name, category_name)
                # Add process list
                category_key = category_path[:-1]
                if category_key in categorized_processes:
                    listnode = nodes.bullet_list()
                    section += listnode
                    for process in categorized_processes[category_key]:
                        par = nodes.paragraph()
                        node = nodes.reference('', process['name'], internal=True)
                        node['refuri'] = config.autoprocess_definitions_uri + '#process-' + process['slug']
                        node['reftitle'] = process['name']
                        par += node
                        listnode += nodes.list_item('', par)
                category_sections[parent_category_path] += section
                category_sections[category_path] = section
                if parent_category_path == '':
                    top_categories.append(section)
        # Return top sections only
        return top_categories
class AutoProcessTypesDirective(Directive):
    """Automatically document process types."""
    has_content = True
    required_arguments = 0
    optional_arguments = 0
    final_argument_whitespace = False
    option_spec = None
    def run(self):
        """Create a type list."""
        config = self.state.document.settings.env.config
        # Group processes by category
        processes = get_processes(config.autoprocess_process_dir, config.autoprocess_source_base_url)
        processes.sort(key=itemgetter('type'))
        processes_by_types = {k: list(g) for k, g in groupby(processes, itemgetter('type'))}
        listnode = nodes.bullet_list()
        for typ in sorted(processes_by_types.keys()):
            par = nodes.paragraph()
            par += nodes.literal(typ, typ)
            par += nodes.Text(' - ')
            processes = sorted(processes_by_types[typ], key=itemgetter('name'))
            last_process = processes[-1]
            for process in processes:
                node = nodes.reference('', process['name'], internal=True)
                node['refuri'] = config.autoprocess_definitions_uri + '#process-' + process['slug']
                node['reftitle'] = process['name']
                par += node
                if process != last_process:
                    par += nodes.Text(', ')
            listnode += nodes.list_item('', par)
        return [listnode]
def setup(app):
    """Register directives.
    When sphinx loads the extension (= imports the extension module) it
    also executes the setup() function. Setup is the way extension
    informs Sphinx about everything that the extension enables: which
    config_values are introduced, which custom nodes/directives/roles
    and which events are defined in extension.
    In this case, only one new directive is created. All used nodes are
    constructed from already existing nodes in docutils.nodes package.
    """
    app.add_config_value('autoprocess_process_dir', '', 'env')
    app.add_config_value('autoprocess_source_base_url', '', 'env')
    app.add_config_value('autoprocess_definitions_uri', '', 'env')
    app.add_directive('autoprocess', AutoProcessDirective)
    app.add_directive('autoprocesscategory', AutoProcessCategoryDirective)
    app.add_directive('autoprocesstype', AutoProcessTypesDirective)
    # The setup() function can return a dictionary. This is treated by
    # Sphinx as metadata of the extension:
    return {'version': '0.2'}
 | |
| 
	import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_true
from sklearn import linear_model, datasets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
    """
    Principle of Lars is to keep covariances tied and decreasing
    """
    alphas_, active, coef_path_ = linear_model.lars_path(
        diabetes.data, diabetes.target, method="lar")
    for (i, coef_) in enumerate(coef_path_.T):
        res = y - np.dot(X, coef_)
        cov = np.dot(X.T, res)
        C = np.max(abs(cov))
        eps = 1e-3
        ocur = len(cov[C - eps < abs(cov)])
        if i < X.shape[1]:
            assert_true(ocur == i + 1)
        else:
            # no more than max_pred variables can go into the active set
            assert_true(ocur == X.shape[1])
def test_simple_precomputed():
    """
    The same, with precomputed Gram matrix
    """
    G = np.dot(diabetes.data.T, diabetes.data)
    alphas_, active, coef_path_ = linear_model.lars_path(
        diabetes.data, diabetes.target, Gram=G, method="lar")
    for i, coef_ in enumerate(coef_path_.T):
        res = y - np.dot(X, coef_)
        cov = np.dot(X.T, res)
        C = np.max(abs(cov))
        eps = 1e-3
        ocur = len(cov[C - eps < abs(cov)])
        if i < X.shape[1]:
            assert_true(ocur == i + 1)
        else:
            # no more than max_pred variables can go into the active set
            assert_true(ocur == X.shape[1])
def test_lars_lstsq():
    """
    Test that Lars gives least square solution at the end
    of the path
    """
    X1 = 3 * diabetes.data  # use un-normalized dataset
    clf = linear_model.LassoLars(alpha=0.)
    clf.fit(X1, y)
    coef_lstsq = np.linalg.lstsq(X1, y)[0]
    assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
    """
    Test that Lars Lasso gives least square solution at the end
    of the path
    """
    alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
    coef_lstsq = np.linalg.lstsq(X, y)[0]
    assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
    """Check that lars_path is robust to collinearity in input"""
    X = np.array([[3., 3., 1.],
                  [2., 2., 0.],
                  [1., 1., 0]])
    y = np.array([1., 0., 0])
    _, _, coef_path_ = linear_model.lars_path(X, y, alpha_min=0.01)
    assert_true(not np.isnan(coef_path_).any())
    residual = np.dot(X, coef_path_[:, -1]) - y
    assert_true((residual ** 2).sum() < 1.)  # just make sure it's bounded
def test_singular_matrix():
    """
    Test when input is a singular matrix
    """
    X1 = np.array([[1, 1.], [1., 1.]])
    y1 = np.array([1, 1])
    alphas, active, coef_path = linear_model.lars_path(X1, y1)
    assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0], [1, 0]])
def test_lasso_lars_vs_lasso_cd(verbose=False):
    """
    Test that LassoLars and Lasso using coordinate descent give the
    same results
    """
    X = 3 * diabetes.data
    alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
    lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
    for c, a in zip(lasso_path.T, alphas):
        if a == 0:
            continue
        lasso_cd.alpha = a
        lasso_cd.fit(X, y)
        error = np.linalg.norm(c - lasso_cd.coef_)
        assert_true(error < 0.01)
    # similar test, with the classifiers
    for alpha in np.linspace(1e-2, 1 - 1e-2):
        clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
        clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
                                  normalize=False).fit(X, y)
        err = np.linalg.norm(clf1.coef_ - clf2.coef_)
        assert_true(err < 1e-3)
    # same test, with normalized data
    X = diabetes.data
    alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
    lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
                                  tol=1e-8)
    for c, a in zip(lasso_path.T, alphas):
        if a == 0:
            continue
        lasso_cd.alpha = a
        lasso_cd.fit(X, y)
        error = np.linalg.norm(c - lasso_cd.coef_)
        assert_true(error < 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
    """
    Test that LassoLars and Lasso using coordinate descent give the
    same results when early stopping is used.
    (test : before, in the middle, and in the last part of the path)
    """
    alphas_min = [10, 0.9, 1e-4]
    for alphas_min in alphas_min:
        alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
                                                    alpha_min=0.9)
        lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
        lasso_cd.alpha = alphas[-1]
        lasso_cd.fit(X, y)
        error = np.linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
        assert_true(error < 0.01)
    alphas_min = [10, 0.9, 1e-4]
    # same test, with normalization
    for alphas_min in alphas_min:
        alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
                                                    alpha_min=0.9)
        lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
                                      tol=1e-8)
        lasso_cd.alpha = alphas[-1]
        lasso_cd.fit(X, y)
        error = np.linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
        assert_true(error < 0.01)
def test_lars_add_features():
    """
    assure that at least some features get added if necessary
    test for 6d2b4c
    """
    # Hilbert matrix
    n = 5
    H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
    clf = linear_model.Lars(fit_intercept=False).fit(
        H, np.arange(n))
    assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
    lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
    lars.fit(X, y)
    assert_true(len(lars.coef_.nonzero()[0]) == 6)
def test_lars_cv():
    """ Test the LassoLarsCV object by checking that the optimal alpha
        increases as the number of samples increases.
        This property is not actualy garantied in general and is just a
        property of the given dataset, with the given steps chosen.
    """
    old_alpha = 0
    lars_cv = linear_model.LassoLarsCV()
    for length in (400, 200, 100):
        X = diabetes.data[:length]
        y = diabetes.target[:length]
        lars_cv.fit(X, y)
        np.testing.assert_array_less(old_alpha, lars_cv.alpha)
        old_alpha = lars_cv.alpha
def test_lasso_lars_ic():
    """ Test the LassoLarsIC object by checking that
        - some good features are selected.
        - alpha_bic > alpha_aic
        - n_nonzero_bic < n_nonzero_aic
    """
    lars_bic = linear_model.LassoLarsIC('bic')
    lars_aic = linear_model.LassoLarsIC('aic')
    rng = np.random.RandomState(42)
    X = diabetes.data
    y = diabetes.target
    X = np.c_[X, rng.randn(X.shape[0], 4)]  # add 4 bad features
    lars_bic.fit(X, y)
    lars_aic.fit(X, y)
    nonzero_bic = np.where(lars_bic.coef_)[0]
    nonzero_aic = np.where(lars_aic.coef_)[0]
    assert_true(lars_bic.alpha_ > lars_aic.alpha_)
    assert_true(len(nonzero_bic) < len(nonzero_aic))
    assert_true(np.max(nonzero_bic) < diabetes.data.shape[1])
if __name__ == '__main__':
    import nose
    nose.runmodule()
 | |
| 
	# Copyright 2021 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test google-cloud-pipeline-Components to ensure the compile without error."""
import json
import os
import unittest
import kfp
from kfp.v2 import compiler
from google.cloud import aiplatform
from google_cloud_pipeline_components.v1.dataset import (
    ImageDatasetCreateOp,
    TabularDatasetCreateOp,
    ImageDatasetExportDataOp,
    ImageDatasetImportDataOp,
    TabularDatasetExportDataOp,
    TextDatasetCreateOp,
    TextDatasetExportDataOp,
    TextDatasetImportDataOp,
    VideoDatasetCreateOp,
    VideoDatasetExportDataOp,
    VideoDatasetImportDataOp,
    TimeSeriesDatasetCreateOp,
    TimeSeriesDatasetExportDataOp,
)
class ComponentsCompileTest(unittest.TestCase):
  def setUp(self):
    super(ComponentsCompileTest, self).setUp()
    self._project = "test_project"
    self._location = "us-central1"
    self._display_name = "test_display_name"
    self._model_display_name = "test_model_display_name"
    self._gcs_source = "gs://test_gcs_source"
    self._gcs_output_dir = "gs://test_gcs_output_dir"
    self._pipeline_root = "gs://test_pipeline_root"
    self._gcs_destination_prefix = "gs://test_gcs_output_dir/batch_prediction"
    self._serving_container_image_uri = "gcr.io/test_project/test_image:test_tag"
    self._artifact_uri = "project/test_artifact_uri"
    self._package_path = os.path.join(
        os.getenv("TEST_UNDECLARED_OUTPUTS_DIR"), "pipeline.json")
  def tearDown(self):
    if os.path.exists(self._package_path):
      os.remove(self._package_path)
  def test_image_dataset_component_compile(self):
    @kfp.dsl.pipeline(name="training-test")
    def pipeline():
      dataset_create_op = ImageDatasetCreateOp(
          project=self._project,
          display_name=self._display_name,
          gcs_source=self._gcs_source,
          import_schema_uri=aiplatform.schema.dataset.ioformat.image
          .single_label_classification,
      )
      dataset_export_op = ImageDatasetExportDataOp(
          project=self._project,
          dataset=dataset_create_op.outputs["dataset"],
          output_dir=self._gcs_output_dir,
      )
      dataset_import_op = ImageDatasetImportDataOp(
          project=self._project,
          gcs_source=self._gcs_source,
          dataset=dataset_create_op.outputs["dataset"],
          import_schema_uri=aiplatform.schema.dataset.ioformat.image
          .single_label_classification)
    compiler.Compiler().compile(
        pipeline_func=pipeline, package_path=self._package_path)
    with open(self._package_path) as f:
      executor_output_json = json.load(f, strict=False)
    with open("testdata/image_dataset_pipeline.json") as ef:
      expected_executor_output_json = json.load(ef, strict=False)
    # Ignore the kfp SDK version during comparison
    del executor_output_json["sdkVersion"]
    self.assertEqual(executor_output_json, expected_executor_output_json)
  def test_tabular_dataset_component_compile(self):
    @kfp.dsl.pipeline(name="training-test")
    def pipeline():
      dataset_create_op = TabularDatasetCreateOp(
          project=self._project,
          display_name=self._display_name,
          gcs_source=self._gcs_source,
      )
      dataset_export_op = TabularDatasetExportDataOp(
          project=self._project,
          dataset=dataset_create_op.outputs["dataset"],
          output_dir=self._gcs_output_dir,
      )
    compiler.Compiler().compile(
        pipeline_func=pipeline, package_path=self._package_path)
    with open(self._package_path) as f:
      executor_output_json = json.load(f, strict=False)
    with open("testdata/tabular_dataset_pipeline.json") as ef:
      expected_executor_output_json = json.load(ef, strict=False)
    # Ignore the kfp SDK version during comparison
    del executor_output_json["sdkVersion"]
    self.assertEqual(executor_output_json, expected_executor_output_json)
  def test_text_dataset_component_compile(self):
    @kfp.dsl.pipeline(name="training-test")
    def pipeline():
      dataset_create_op = TextDatasetCreateOp(
          project=self._project,
          display_name=self._display_name,
          gcs_source=self._gcs_source,
          import_schema_uri=aiplatform.schema.dataset.ioformat.text
          .multi_label_classification,
      )
      dataset_export_op = TextDatasetExportDataOp(
          project=self._project,
          dataset=dataset_create_op.outputs["dataset"],
          output_dir=self._gcs_output_dir,
      )
      dataset_import_op = TextDatasetImportDataOp(
          project=self._project,
          gcs_source=self._gcs_source,
          dataset=dataset_create_op.outputs["dataset"],
          import_schema_uri=aiplatform.schema.dataset.ioformat.text
          .multi_label_classification)
    compiler.Compiler().compile(
        pipeline_func=pipeline, package_path=self._package_path)
    with open(self._package_path) as f:
      executor_output_json = json.load(f, strict=False)
    with open("testdata/text_dataset_pipeline.json") as ef:
      expected_executor_output_json = json.load(ef, strict=False)
    # Ignore the kfp SDK version during comparison
    del executor_output_json["sdkVersion"]
    self.assertEqual(executor_output_json, expected_executor_output_json)
  def test_video_dataset_component_compile(self):
    @kfp.dsl.pipeline(name="training-test")
    def pipeline():
      dataset_create_op = VideoDatasetCreateOp(
          project=self._project,
          display_name=self._display_name,
          gcs_source=self._gcs_source,
          import_schema_uri=aiplatform.schema.dataset.ioformat.video
          .classification,
      )
      dataset_export_op = VideoDatasetExportDataOp(
          project=self._project,
          dataset=dataset_create_op.outputs["dataset"],
          output_dir=self._gcs_output_dir,
      )
      dataset_import_op = VideoDatasetImportDataOp(
          project=self._project,
          gcs_source=self._gcs_source,
          dataset=dataset_create_op.outputs["dataset"],
          import_schema_uri=aiplatform.schema.dataset.ioformat.video
          .classification)
    compiler.Compiler().compile(
        pipeline_func=pipeline, package_path=self._package_path)
    with open(self._package_path) as f:
      executor_output_json = json.load(f, strict=False)
    with open("testdata/video_dataset_pipeline.json") as ef:
      expected_executor_output_json = json.load(ef, strict=False)
    # Ignore the kfp SDK version during comparison
    del executor_output_json["sdkVersion"]
    self.assertEqual(executor_output_json, expected_executor_output_json)
  def test_time_series_dataset_component_compile(self):
    @kfp.dsl.pipeline(name="training-test")
    def pipeline():
      dataset_create_op = TimeSeriesDatasetCreateOp(
          project=self._project,
          display_name=self._display_name,
          gcs_source=self._gcs_source,
      )
      dataset_export_op = TimeSeriesDatasetExportDataOp(
          project=self._project,
          dataset=dataset_create_op.outputs["dataset"],
          output_dir=self._gcs_output_dir,
      )
    compiler.Compiler().compile(
        pipeline_func=pipeline, package_path=self._package_path)
    with open(self._package_path) as f:
      executor_output_json = json.load(f, strict=False)
    with open("testdata/time_series_dataset_pipeline.json") as ef:
      expected_executor_output_json = json.load(ef, strict=False)
    # Ignore the kfp SDK version during comparison
    del executor_output_json["sdkVersion"]
    self.assertEqual(executor_output_json, expected_executor_output_json)
 | |
| 
	import _thread as thread
import os
import platform
import tempfile
import time
from logging import getLogger
from subprocess import Popen
from pulsar.managers import status
from pulsar.managers.base.directory import DirectoryBaseManager
from .util import kill_pid
log = getLogger(__name__)
JOB_FILE_SUBMITTED = "submitted"
JOB_FILE_PID = "pid"
try:
    from galaxy.util.commands import new_clean_env
except ImportError:
    # We can drop this once we require galaxy-util >=21.01
    def new_clean_env():
        """
        Returns a minimal environment to use when invoking a subprocess
        """
        env = {}
        for k in ("HOME", "PATH", "TMPDIR"):
            if k in os.environ:
                env[k] = os.environ[k]
        if "TMPDIR" not in env:
            env["TMPDIR"] = os.path.abspath(tempfile.gettempdir())
        # Set LC_CTYPE environment variable to enforce UTF-8 file encoding.
        # This is needed e.g. for Python < 3.7 where
        # `locale.getpreferredencoding()` (also used by open() to determine the
        # default file encoding) would return `ANSI_X3.4-1968` without this.
        env["LC_CTYPE"] = "C.UTF-8"
        return env
class BaseUnqueuedManager(DirectoryBaseManager):
    def _record_submission(self, job_id):
        self._job_directory(job_id).store_metadata(JOB_FILE_SUBMITTED, 'true')
    def _get_status(self, job_id):
        job_directory = self._job_directory(job_id)
        if self._was_cancelled(job_id):
            job_status = status.CANCELLED
        elif job_directory.has_metadata(JOB_FILE_PID):
            job_status = status.RUNNING
        elif job_directory.has_metadata(JOB_FILE_SUBMITTED):
            job_status = status.QUEUED
        else:
            job_status = status.COMPLETE
        return job_status
    def _finish_execution(self, job_id):
        self._job_directory(job_id).remove_metadata(JOB_FILE_SUBMITTED)
    def _prepare_run(self, job_id, command_line, dependencies_description, env, setup_params=None):
        self._check_execution_with_tool_file(job_id, command_line)
        self._record_submission(job_id)
        if platform.system().lower() == "windows":
            # TODO: Don't ignore requirements and env without warning. Ideally
            # process them or at least warn about them being ignored.
            command_line = self._expand_command_line(command_line, dependencies_description, job_directory=self.job_directory(job_id).job_directory)
        else:
            command_line = self._setup_job_file(
                job_id,
                command_line,
                dependencies_description=dependencies_description,
                env=env,
                setup_params=setup_params
            )
        return command_line
    def _start_monitor(self, *args, **kwd):
        if kwd.get("background", True):
            thread.start_new_thread(self._monitor_execution, args)
        else:
            self._monitor_execution(*args)
# Job Locks (for status updates). Following methods are locked.
#    _finish_execution(self, job_id)
#    _get_status(self, job_id)
#    _is_cancelled(self, job_id)
#    _record_pid(self, job_id, pid)
#    _get_pid_for_killing_or_cancel(self, job_id)
#
class Manager(BaseUnqueuedManager):
    """
    A simple job manager that just directly runs jobs as given (no
    queueing). Preserved for compatibilty with older versions of Pulsar
    client code where Galaxy is used to maintain queue (like Galaxy's
    local job runner).
    """
    manager_type = "unqueued"
    def __init__(self, name, app, **kwds):
        super().__init__(name, app, **kwds)
    def __get_pid(self, job_id):
        pid = None
        try:
            pid = self._job_directory(job_id).load_metadata(JOB_FILE_PID)
            if pid is not None:
                pid = int(pid)
        except Exception:
            pass
        return pid
    def _get_job_lock(self, job_id):
        return self._job_directory(job_id).lock()
    def get_status(self, job_id):
        with self._get_job_lock(job_id):
            return self._get_status(job_id)
    def kill(self, job_id):
        log.info("Attempting to kill job with job_id %s" % job_id)
        job_lock = self._get_job_lock(job_id)
        with job_lock:
            pid = self._get_pid_for_killing_or_cancel(job_id)
        if pid:
            log.info("Attempting to kill pid %s" % pid)
            kill_pid(pid)
    def _monitor_execution(self, job_id, proc, stdout, stderr):
        try:
            proc.wait()
            stdout.close()
            stderr.close()
            return_code = proc.returncode
            # job_script might have set return code so use that if set, otherwise use this one.
            # Should there be someway to signal failure if this is non-0 in that case?
            self._write_return_code_if_unset(job_id, str(return_code))
        finally:
            with self._get_job_lock(job_id):
                self._finish_execution(job_id)
    # with job lock
    def _finish_execution(self, job_id):
        super()._finish_execution(job_id)
        self._job_directory(job_id).remove_metadata(JOB_FILE_PID)
    # with job lock
    def _get_status(self, job_id):
        return super()._get_status(job_id)
    # with job lock
    def _was_cancelled(self, job_id):
        return super()._was_cancelled(job_id)
    # with job lock
    def _record_pid(self, job_id, pid):
        self._job_directory(job_id).store_metadata(JOB_FILE_PID, str(pid))
    # with job lock
    def _get_pid_for_killing_or_cancel(self, job_id):
        job_status = self._get_status(job_id)
        if job_status not in [status.RUNNING, status.QUEUED]:
            return
        pid = self.__get_pid(job_id)
        self._record_cancel(job_id)
        if pid is None:
            self._job_directory(job_id).remove_metadata(JOB_FILE_SUBMITTED)
        return pid
    def _run(self, job_id, command_line, background=True):
        with self._get_job_lock(job_id):
            if self._was_cancelled(job_id):
                return
        proc, stdout, stderr = self._proc_for_job_id(job_id, command_line)
        with self._get_job_lock(job_id):
            self._record_pid(job_id, proc.pid)
        self._start_monitor(job_id, proc, stdout, stderr, background=background)
    def _proc_for_job_id(self, job_id, command_line):
        job_directory = self.job_directory(job_id)
        working_directory = job_directory.working_directory()
        stdout = self._open_standard_output(job_id)
        stderr = self._open_standard_error(job_id)
        proc = execute(command_line=command_line,
                       working_directory=working_directory,
                       stdout=stdout,
                       stderr=stderr)
        return proc, stdout, stderr
    def launch(self, job_id, command_line, submit_params={}, dependencies_description=None, env=[], setup_params=None):
        command_line = self._prepare_run(job_id, command_line, dependencies_description=dependencies_description, env=env, setup_params=setup_params)
        self._run(job_id, command_line)
class CoexecutionManager(BaseUnqueuedManager):
    """Manager that managers one job in a pod-like environment.
    Assume some process in another container will execute the command.
    """
    manager_type = "coexecution"
    def __init__(self, name, app, **kwds):
        super().__init__(name, app, **kwds)
    def get_status(self, job_id):
        return self._get_status(job_id)
    def kill(self, job_id):
        log.info("Attempting to kill job with job_id %s - unimplemented in CoexecutionManager..." % job_id)
    def _monitor_execution(self, job_id):
        return_code_path = self._return_code_path(job_id)
        # Write dummy JOB_FILE_PID so get_status thinks this job is running.
        self._job_directory(job_id).store_metadata(JOB_FILE_PID, "1")
        try:
            while not os.path.exists(return_code_path):
                time.sleep(0.1)
                print("monitoring for %s" % return_code_path)
                continue
            print("found return code path...")
            self._job_directory(job_id).remove_metadata(JOB_FILE_PID)
            time.sleep(1)
        finally:
            self._finish_execution(job_id)
    def launch(self, job_id, command_line, submit_params={}, dependencies_description=None, env=[], setup_params=None):
        command_line = self._prepare_run(job_id, command_line, dependencies_description=dependencies_description, env=env, setup_params=setup_params)
        job_directory = self.job_directory(job_id)
        working_directory = job_directory.working_directory()
        command_line += " > '{}' 2> '{}'".format(
            self._stdout_path(job_id),
            self._stderr_path(job_id),
        )
        command_line = "cd '{}'; sh {}".format(working_directory, command_line)
        self._write_command_line(job_id, command_line)
        self._start_monitor(job_id)
def execute(command_line, working_directory, stdout, stderr):
    preexec_fn = None
    if platform.system() != 'Windows':
        preexec_fn = os.setpgrp
    proc = Popen(
        args=command_line,
        shell=True,
        cwd=working_directory,
        stdout=stdout,
        stderr=stderr,
        preexec_fn=preexec_fn,
        env=new_clean_env(),
    )
    return proc
__all__ = ['Manager']
 | |
| 
	import numpy as np
import pytest
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import rand_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import pair_confusion_matrix
from sklearn.metrics.cluster import entropy
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import fowlkes_mallows_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster._supervised import _generalized_average
from sklearn.metrics.cluster._supervised import check_clusterings
from sklearn.utils import assert_all_finite
from sklearn.utils._testing import (
    assert_almost_equal, ignore_warnings)
from numpy.testing import (
    assert_array_equal, assert_array_almost_equal, assert_allclose)
score_funcs = [
    adjusted_rand_score,
    rand_score,
    homogeneity_score,
    completeness_score,
    v_measure_score,
    adjusted_mutual_info_score,
    normalized_mutual_info_score,
]
@ignore_warnings(category=FutureWarning)
def test_error_messages_on_wrong_input():
    for score_func in score_funcs:
        expected = (r'Found input variables with inconsistent numbers '
                    r'of samples: \[2, 3\]')
        with pytest.raises(ValueError, match=expected):
            score_func([0, 1], [1, 1, 1])
        expected = r"labels_true must be 1D: shape is \(2"
        with pytest.raises(ValueError, match=expected):
            score_func([[0, 1], [1, 0]], [1, 1, 1])
        expected = r"labels_pred must be 1D: shape is \(2"
        with pytest.raises(ValueError, match=expected):
            score_func([0, 1, 0], [[1, 1], [0, 0]])
def test_generalized_average():
    a, b = 1, 2
    methods = ["min", "geometric", "arithmetic", "max"]
    means = [_generalized_average(a, b, method) for method in methods]
    assert means[0] <= means[1] <= means[2] <= means[3]
    c, d = 12, 12
    means = [_generalized_average(c, d, method) for method in methods]
    assert means[0] == means[1] == means[2] == means[3]
@ignore_warnings(category=FutureWarning)
def test_perfect_matches():
    for score_func in score_funcs:
        assert score_func([], []) == pytest.approx(1.0)
        assert score_func([0], [1]) == pytest.approx(1.0)
        assert score_func([0, 0, 0], [0, 0, 0]) == pytest.approx(1.0)
        assert score_func([0, 1, 0], [42, 7, 42]) == pytest.approx(1.0)
        assert score_func([0., 1., 0.], [42., 7., 42.]) == pytest.approx(1.0)
        assert score_func([0., 1., 2.], [42., 7., 2.]) == pytest.approx(1.0)
        assert score_func([0, 1, 2], [42, 7, 2]) == pytest.approx(1.0)
    score_funcs_with_changing_means = [
        normalized_mutual_info_score,
        adjusted_mutual_info_score,
    ]
    means = {"min", "geometric", "arithmetic", "max"}
    for score_func in score_funcs_with_changing_means:
        for mean in means:
            assert score_func([], [], mean) == pytest.approx(1.0)
            assert score_func([0], [1], mean) == pytest.approx(1.0)
            assert score_func([0, 0, 0], [0, 0, 0], mean) == pytest.approx(1.0)
            assert score_func(
                [0, 1, 0], [42, 7, 42], mean) == pytest.approx(1.0)
            assert score_func(
                [0., 1., 0.], [42., 7., 42.], mean) == pytest.approx(1.0)
            assert score_func(
                [0., 1., 2.], [42., 7., 2.], mean) == pytest.approx(1.0)
            assert score_func(
                [0, 1, 2], [42, 7, 2], mean) == pytest.approx(1.0)
def test_homogeneous_but_not_complete_labeling():
    # homogeneous but not complete clustering
    h, c, v = homogeneity_completeness_v_measure(
        [0, 0, 0, 1, 1, 1],
        [0, 0, 0, 1, 2, 2])
    assert_almost_equal(h, 1.00, 2)
    assert_almost_equal(c, 0.69, 2)
    assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
    # complete but not homogeneous clustering
    h, c, v = homogeneity_completeness_v_measure(
        [0, 0, 1, 1, 2, 2],
        [0, 0, 1, 1, 1, 1])
    assert_almost_equal(h, 0.58, 2)
    assert_almost_equal(c, 1.00, 2)
    assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
    # neither complete nor homogeneous but not so bad either
    h, c, v = homogeneity_completeness_v_measure(
        [0, 0, 0, 1, 1, 1],
        [0, 1, 0, 1, 2, 2])
    assert_almost_equal(h, 0.67, 2)
    assert_almost_equal(c, 0.42, 2)
    assert_almost_equal(v, 0.52, 2)
def test_beta_parameter():
    # test for when beta passed to
    # homogeneity_completeness_v_measure
    # and v_measure_score
    beta_test = 0.2
    h_test = 0.67
    c_test = 0.42
    v_test = ((1 + beta_test) * h_test * c_test
              / (beta_test * h_test + c_test))
    h, c, v = homogeneity_completeness_v_measure(
        [0, 0, 0, 1, 1, 1],
        [0, 1, 0, 1, 2, 2],
        beta=beta_test)
    assert_almost_equal(h, h_test, 2)
    assert_almost_equal(c, c_test, 2)
    assert_almost_equal(v, v_test, 2)
    v = v_measure_score(
        [0, 0, 0, 1, 1, 1],
        [0, 1, 0, 1, 2, 2],
        beta=beta_test)
    assert_almost_equal(v, v_test, 2)
def test_non_consecutive_labels():
    # regression tests for labels with gaps
    h, c, v = homogeneity_completeness_v_measure(
        [0, 0, 0, 2, 2, 2],
        [0, 1, 0, 1, 2, 2])
    assert_almost_equal(h, 0.67, 2)
    assert_almost_equal(c, 0.42, 2)
    assert_almost_equal(v, 0.52, 2)
    h, c, v = homogeneity_completeness_v_measure(
        [0, 0, 0, 1, 1, 1],
        [0, 4, 0, 4, 2, 2])
    assert_almost_equal(h, 0.67, 2)
    assert_almost_equal(c, 0.42, 2)
    assert_almost_equal(v, 0.52, 2)
    ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
    ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
    assert_almost_equal(ari_1, 0.24, 2)
    assert_almost_equal(ari_2, 0.24, 2)
    ri_1 = rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
    ri_2 = rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
    assert_almost_equal(ri_1, 0.66, 2)
    assert_almost_equal(ri_2, 0.66, 2)
@ignore_warnings(category=FutureWarning)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
                             seed=42):
    # Compute score for random uniform cluster labelings
    random_labels = np.random.RandomState(seed).randint
    scores = np.zeros((len(k_range), n_runs))
    for i, k in enumerate(k_range):
        for j in range(n_runs):
            labels_a = random_labels(low=0, high=k, size=n_samples)
            labels_b = random_labels(low=0, high=k, size=n_samples)
            scores[i, j] = score_func(labels_a, labels_b)
    return scores
@ignore_warnings(category=FutureWarning)
def test_adjustment_for_chance():
    # Check that adjusted scores are almost zero on random labels
    n_clusters_range = [2, 10, 50, 90]
    n_samples = 100
    n_runs = 10
    scores = uniform_labelings_scores(
        adjusted_rand_score, n_samples, n_clusters_range, n_runs)
    max_abs_scores = np.abs(scores).max(axis=1)
    assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
    # Compute the Adjusted Mutual Information and test against known values
    labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
    labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
    # Mutual information
    mi = mutual_info_score(labels_a, labels_b)
    assert_almost_equal(mi, 0.41022, 5)
    # with provided sparse contingency
    C = contingency_matrix(labels_a, labels_b, sparse=True)
    mi = mutual_info_score(labels_a, labels_b, contingency=C)
    assert_almost_equal(mi, 0.41022, 5)
    # with provided dense contingency
    C = contingency_matrix(labels_a, labels_b)
    mi = mutual_info_score(labels_a, labels_b, contingency=C)
    assert_almost_equal(mi, 0.41022, 5)
    # Expected mutual information
    n_samples = C.sum()
    emi = expected_mutual_information(C, n_samples)
    assert_almost_equal(emi, 0.15042, 5)
    # Adjusted mutual information
    ami = adjusted_mutual_info_score(labels_a, labels_b)
    assert_almost_equal(ami, 0.27821, 5)
    ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
    assert ami == pytest.approx(1.0)
    # Test with a very large array
    a110 = np.array([list(labels_a) * 110]).flatten()
    b110 = np.array([list(labels_b) * 110]).flatten()
    ami = adjusted_mutual_info_score(a110, b110)
    assert_almost_equal(ami, 0.38, 2)
def test_expected_mutual_info_overflow():
    # Test for regression where contingency cell exceeds 2**16
    # leading to overflow in np.outer, resulting in EMI > 1
    assert expected_mutual_information(np.array([[70000]]), 70000) <= 1
def test_int_overflow_mutual_info_fowlkes_mallows_score():
    # Test overflow in mutual_info_classif and fowlkes_mallows_score
    x = np.array([1] * (52632 + 2529) + [2] * (14660 + 793) + [3] * (3271 +
                 204) + [4] * (814 + 39) + [5] * (316 + 20))
    y = np.array([0] * 52632 + [1] * 2529 + [0] * 14660 + [1] * 793 +
                 [0] * 3271 + [1] * 204 + [0] * 814 + [1] * 39 + [0] * 316 +
                 [1] * 20)
    assert_all_finite(mutual_info_score(x, y))
    assert_all_finite(fowlkes_mallows_score(x, y))
def test_entropy():
    ent = entropy([0, 0, 42.])
    assert_almost_equal(ent, 0.6365141, 5)
    assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
    labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
    labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
    C = contingency_matrix(labels_a, labels_b)
    C2 = np.histogram2d(labels_a, labels_b,
                        bins=(np.arange(1, 5),
                              np.arange(1, 5)))[0]
    assert_array_almost_equal(C, C2)
    C = contingency_matrix(labels_a, labels_b, eps=.1)
    assert_array_almost_equal(C, C2 + .1)
def test_contingency_matrix_sparse():
    labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
    labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
    C = contingency_matrix(labels_a, labels_b)
    C_sparse = contingency_matrix(labels_a, labels_b, sparse=True).toarray()
    assert_array_almost_equal(C, C_sparse)
    with pytest.raises(ValueError, match="Cannot set 'eps' when sparse=True"):
        contingency_matrix(labels_a, labels_b, eps=1e-10, sparse=True)
@ignore_warnings(category=FutureWarning)
def test_exactly_zero_info_score():
    # Check numerical stability when information is exactly zero
    for i in np.logspace(1, 4, 4).astype(int):
        labels_a, labels_b = (np.ones(i, dtype=int),
                              np.arange(i, dtype=int))
        assert normalized_mutual_info_score(
            labels_a, labels_b) == pytest.approx(0.0)
        assert v_measure_score(
            labels_a, labels_b) == pytest.approx(0.0)
        assert adjusted_mutual_info_score(
            labels_a, labels_b) == pytest.approx(0.0)
        assert normalized_mutual_info_score(
            labels_a, labels_b) == pytest.approx(0.0)
        for method in ["min", "geometric", "arithmetic", "max"]:
            assert adjusted_mutual_info_score(
                labels_a, labels_b,  method) == pytest.approx(0.0)
            assert normalized_mutual_info_score(
                labels_a, labels_b, method) == pytest.approx(0.0)
def test_v_measure_and_mutual_information(seed=36):
    # Check relation between v_measure, entropy and mutual information
    for i in np.logspace(1, 4, 4).astype(int):
        random_state = np.random.RandomState(seed)
        labels_a, labels_b = (random_state.randint(0, 10, i),
                              random_state.randint(0, 10, i))
        assert_almost_equal(v_measure_score(labels_a, labels_b),
                            2.0 * mutual_info_score(labels_a, labels_b) /
                            (entropy(labels_a) + entropy(labels_b)), 0)
        avg = 'arithmetic'
        assert_almost_equal(v_measure_score(labels_a, labels_b),
                            normalized_mutual_info_score(labels_a, labels_b,
                                                         average_method=avg)
                            )
def test_fowlkes_mallows_score():
    # General case
    score = fowlkes_mallows_score([0, 0, 0, 1, 1, 1],
                                  [0, 0, 1, 1, 2, 2])
    assert_almost_equal(score, 4. / np.sqrt(12. * 6.))
    # Perfect match but where the label names changed
    perfect_score = fowlkes_mallows_score([0, 0, 0, 1, 1, 1],
                                          [1, 1, 1, 0, 0, 0])
    assert_almost_equal(perfect_score, 1.)
    # Worst case
    worst_score = fowlkes_mallows_score([0, 0, 0, 0, 0, 0],
                                        [0, 1, 2, 3, 4, 5])
    assert_almost_equal(worst_score, 0.)
def test_fowlkes_mallows_score_properties():
    # handcrafted example
    labels_a = np.array([0, 0, 0, 1, 1, 2])
    labels_b = np.array([1, 1, 2, 2, 0, 0])
    expected = 1. / np.sqrt((1. + 3.) * (1. + 2.))
    # FMI = TP / sqrt((TP + FP) * (TP + FN))
    score_original = fowlkes_mallows_score(labels_a, labels_b)
    assert_almost_equal(score_original, expected)
    # symmetric property
    score_symmetric = fowlkes_mallows_score(labels_b, labels_a)
    assert_almost_equal(score_symmetric, expected)
    # permutation property
    score_permuted = fowlkes_mallows_score((labels_a + 1) % 3, labels_b)
    assert_almost_equal(score_permuted, expected)
    # symmetric and permutation(both together)
    score_both = fowlkes_mallows_score(labels_b, (labels_a + 2) % 3)
    assert_almost_equal(score_both, expected)
@pytest.mark.parametrize('labels_true, labels_pred', [
    (['a'] * 6, [1, 1, 0, 0, 1, 1]),
    ([1] * 6, [1, 1, 0, 0, 1, 1]),
    ([1, 1, 0, 0, 1, 1], ['a'] * 6),
    ([1, 1, 0, 0, 1, 1], [1] * 6),
])
def test_mutual_info_score_positive_constant_label(labels_true, labels_pred):
    # non-regression test for #16355
    assert mutual_info_score(labels_true, labels_pred) >= 0
def test_check_clustering_error():
    # Test warning message for continuous values
    rng = np.random.RandomState(42)
    noise = rng.rand(500)
    wavelength = np.linspace(0.01, 1, 500) * 1e-6
    msg = 'Clustering metrics expects discrete values but received ' \
          'continuous values for label, and continuous values for ' \
          'target'
    with pytest.warns(UserWarning, match=msg):
        check_clusterings(wavelength, noise)
def test_pair_confusion_matrix_fully_dispersed():
    # edge case: every element is its own cluster
    N = 100
    clustering1 = list(range(N))
    clustering2 = clustering1
    expected = np.array([[N * (N - 1), 0], [0, 0]])
    assert_array_equal(
        pair_confusion_matrix(clustering1, clustering2), expected
    )
def test_pair_confusion_matrix_single_cluster():
    # edge case: only one cluster
    N = 100
    clustering1 = np.zeros((N,))
    clustering2 = clustering1
    expected = np.array([[0, 0], [0, N * (N - 1)]])
    assert_array_equal(
        pair_confusion_matrix(clustering1, clustering2), expected
    )
def test_pair_confusion_matrix():
    # regular case: different non-trivial clusterings
    n = 10
    N = n ** 2
    clustering1 = np.hstack([[i + 1] * n for i in range(n)])
    clustering2 = np.hstack([[i + 1] * (n + 1) for i in range(n)])[:N]
    # basic quadratic implementation
    expected = np.zeros(shape=(2, 2), dtype=np.int64)
    for i in range(len(clustering1)):
        for j in range(len(clustering2)):
            if i != j:
                same_cluster_1 = int(clustering1[i] == clustering1[j])
                same_cluster_2 = int(clustering2[i] == clustering2[j])
                expected[same_cluster_1, same_cluster_2] += 1
    assert_array_equal(
        pair_confusion_matrix(clustering1, clustering2), expected
    )
@pytest.mark.parametrize(
    "clustering1, clustering2",
    [(list(range(100)), list(range(100))),
     (np.zeros((100,)), np.zeros((100,)))]
)
def test_rand_score_edge_cases(clustering1, clustering2):
    # edge case 1: every element is its own cluster
    # edge case 2: only one cluster
    assert_allclose(rand_score(clustering1, clustering2), 1.)
def test_rand_score():
    # regular case: different non-trivial clusterings
    clustering1 = [0, 0, 0, 1, 1, 1]
    clustering2 = [0, 1, 0, 1, 2, 2]
    # pair confusion matrix
    D11 = 2 * 2  # ordered pairs (1, 3), (5, 6)
    D10 = 2 * 4  # ordered pairs (1, 2), (2, 3), (4, 5), (4, 6)
    D01 = 2 * 1  # ordered pair (2, 4)
    D00 = 5 * 6 - D11 - D01 - D10  # the remaining pairs
    # rand score
    expected_numerator = D00 + D11
    expected_denominator = D00 + D01 + D10 + D11
    expected = expected_numerator / expected_denominator
    assert_allclose(rand_score(clustering1, clustering2), expected)
 | |
| 
	from datetime import datetime
from django.core.urlresolvers import reverse
# Google Diff Match Patch library
# http://code.google.com/p/google-diff-match-patch
from diff_match_patch import diff_match_patch
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.db.models.query import QuerySet
from tagging.fields import TagField
from tagging.models import Tag
from wiki.utils import get_ct
try:
    from notification import models as notification
    from django.db.models import signals
except ImportError:
    notification = None
# We dont need to create a new one everytime
dmp = diff_match_patch()
def diff(txt1, txt2):
    """Create a 'diff' from txt1 to txt2."""
    patch = dmp.patch_make(txt1, txt2)
    return dmp.patch_toText(patch)
try:
    markup_choices = settings.WIKI_MARKUP_CHOICES
except AttributeError:
    markup_choices = (
        ('creole', _(u'Creole')),
        ('restructuredtext', _(u'reStructuredText')),
        ('textile', _(u'Textile')),
        ('markdown', _(u'Markdown')),
    )
# Avoid boilerplate defining our own querysets
class QuerySetManager(models.Manager):
    def get_query_set(self):
        return self.model.QuerySet(self.model)
class NonRemovedArticleManager(QuerySetManager):
    def get_query_set(self):
        q = super(NonRemovedArticleManager, self).get_query_set()
        return q.filter(removed=False)
class Article(models.Model):
    """ A wiki page.
    """
    title = models.CharField(_(u"Title"), max_length=50)
    content = models.TextField(_(u"Content"))
    summary = models.CharField(_(u"Summary"), max_length=150,
                               null=True, blank=True)
    markup = models.CharField(_(u"Content Markup"), max_length=3,
                              choices=markup_choices,
                              null=True, blank=True)
    creator = models.ForeignKey(User, verbose_name=_('Article Creator'),
                                null=True)
    creator_ip = models.IPAddressField(_("IP Address of the Article Creator"),
                                       blank=True, null=True)
    created_at = models.DateTimeField(default=datetime.now)
    last_update = models.DateTimeField(blank=True, null=True)
    removed = models.BooleanField(_("Is removed?"), default=False)
    content_type = models.ForeignKey(ContentType, null=True)
    object_id = models.PositiveIntegerField(null=True)
    group = generic.GenericForeignKey('content_type', 'object_id')
    tags = TagField()
    objects = QuerySetManager()
    non_removed_objects = NonRemovedArticleManager()
    class QuerySet(QuerySet):
        def get_by(self, title, group=None):
            if group is None:
                return self.get(title=title)
            return group.get_related_objects(self.filter(title=title)).get()
            
    class Meta:
        verbose_name = _(u'Article')
        verbose_name_plural = _(u'Articles')
    def get_absolute_url(self):
        if self.group is None:
            return reverse('wiki_article', args=(self.title,))
        return self.group.get_absolute_url() + 'wiki/' + self.title
    def save(self, force_insert=False, force_update=False):
        self.last_update = datetime.now()
        super(Article, self).save(force_insert, force_update)
    def remove(self):
        """ Mark the Article as 'removed'. If the article is
        already removed, delete it.
        Returns True if the article was deleted, False when just marked
        as removed.
        """
        if self.removed:
            self.delete()
            return True
        else:
            self.removed = True
            self.save()
            return False
    def latest_changeset(self):
        try:
            return self.changeset_set.filter(
                reverted=False).order_by('-revision')[0]
        except IndexError:
            return ChangeSet.objects.none()
    def new_revision(self, old_content, old_title, old_markup,
                     comment, editor_ip, editor):
        '''Create a new ChangeSet with the old content.'''
        content_diff = diff(self.content, old_content)
        cs = ChangeSet.objects.create(
            article=self,
            comment=comment,
            editor_ip=editor_ip,
            editor=editor,
            old_title=old_title,
            old_markup=old_markup,
            content_diff=content_diff)
        if None not in (notification, self.creator):
            if editor is None:
                editor = editor_ip
            notification.send([self.creator], "wiki_article_edited",
                              {'article': self, 'user': editor})
        return cs
    def revert_to(self, revision, editor_ip, editor=None):
        """ Revert the article to a previuos state, by revision number.
        """
        changeset = self.changeset_set.get(revision=revision)
        changeset.reapply(editor_ip, editor)
    def __unicode__(self):
        return self.title
class NonRevertedChangeSetManager(QuerySetManager):
    def get_default_queryset(self):
        super(NonRevertedChangeSetManager, self).get_query_set().filter(
              reverted=False)
class ChangeSet(models.Model):
    """A report of an older version of some Article."""
    article = models.ForeignKey(Article, verbose_name=_(u"Article"))
    # Editor identification -- logged or anonymous
    editor = models.ForeignKey(User, verbose_name=_(u'Editor'),
                               null=True)
    editor_ip = models.IPAddressField(_(u"IP Address of the Editor"))
    # Revision number, starting from 1
    revision = models.IntegerField(_(u"Revision Number"))
    # How to recreate this version
    old_title = models.CharField(_(u"Old Title"), max_length=50, blank=True)
    old_markup = models.CharField(_(u"Article Content Markup"), max_length=3,
                                  choices=markup_choices,
                                  null=True, blank=True)
    content_diff = models.TextField(_(u"Content Patch"), blank=True)
    comment = models.CharField(_(u"Editor comment"), max_length=50, blank=True)
    modified = models.DateTimeField(_(u"Modified at"), default=datetime.now)
    reverted = models.BooleanField(_(u"Reverted Revision"), default=False)
    objects = QuerySetManager()
    non_reverted_objects = NonRevertedChangeSetManager()
    class QuerySet(QuerySet):
        def all_later(self, revision):
            """ Return all changes later to the given revision.
            Util when we want to revert to the given revision.
            """
            return self.filter(revision__gt=int(revision))
    class Meta:
        verbose_name = _(u'Change set')
        verbose_name_plural = _(u'Change sets')
        get_latest_by  = 'modified'
        ordering = ('-revision',)
    def __unicode__(self):
        return u'#%s' % self.revision
    @models.permalink
    def get_absolute_url(self):
        if self.article.group is None:
            return ('wiki_changeset', (),
                    {'title': self.article.title,
                     'revision': self.revision})
        return ('wiki_changeset', (),
                {'group_slug': self.article.group.slug,
                 'title': self.article.title,
                 'revision': self.revision})
    def is_anonymous_change(self):
        return self.editor is None
    def reapply(self, editor_ip, editor):
        """ Return the Article to this revision.
        """
        # XXX Would be better to exclude reverted revisions
        #     and revisions previous/next to reverted ones
        next_changes = self.article.changeset_set.filter(
            revision__gt=self.revision).order_by('-revision')
        article = self.article
        content = None
        for changeset in next_changes:
            if content is None:
                content = article.content
            patch = dmp.patch_fromText(changeset.content_diff)
            content = dmp.patch_apply(patch, content)[0]
            changeset.reverted = True
            changeset.save()
        old_content = article.content
        old_title = article.title
        old_markup = article.markup
        article.content = content
        article.title = changeset.old_title
        article.markup = changeset.old_markup
        article.save()
        article.new_revision(
            old_content=old_content, old_title=old_title,
            old_markup=old_markup,
            comment="Reverted to revision #%s" % self.revision,
            editor_ip=editor_ip, editor=editor)
        self.save()
        if None not in (notification, self.editor):
            notification.send([self.editor], "wiki_revision_reverted",
                              {'revision': self, 'article': self.article})
    def save(self, force_insert=False, force_update=False):
        """ Saves the article with a new revision.
        """
        if self.id is None:
            try:
                self.revision = ChangeSet.objects.filter(
                    article=self.article).latest().revision + 1
            except self.DoesNotExist:
                self.revision = 1
        super(ChangeSet, self).save(force_insert, force_update)
    def display_diff(self):
        ''' Returns a HTML representation of the diff.
        '''
        # well, it *will* be the old content
        old_content = self.article.content
        # newer non-reverted revisions of this article, starting from this
        newer_changesets = ChangeSet.non_reverted_objects.filter(
            article=self.article,
            revision__gte=self.revision)
        # apply all patches to get the content of this revision
        for i, changeset in enumerate(newer_changesets):
            patches = dmp.patch_fromText(changeset.content_diff)
            if len(newer_changesets) == i+1:
                # we need to compare with the next revision after the change
                next_rev_content = old_content
            old_content = dmp.patch_apply(patches, old_content)[0]
        diffs = dmp.diff_main(old_content, next_rev_content)
        return dmp.diff_prettyHtml(diffs)
if notification is not None:
    signals.post_save.connect(notification.handle_observations, sender=Article)
 | |
| 
	#!/usr/bin/env python
# Copyright 2012 - 2013 Red Hat, Inc.
# All Rights Reserved.
#
#    Licensed under the Apache License, Version 2.0 (the "License");
#    you may not use this file except in compliance with the License.
#    You may obtain a copy of the License at
#
#        http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS,
#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#    See the License for the specific language governing permissions and
#    limitations under the License.
import sys
import rtslib
from cinder import i18n
from cinder.i18n import _
i18n.enable_lazy()
class RtstoolError(Exception):
    pass
class RtstoolImportError(RtstoolError):
    pass
def create(backing_device, name, userid, password, initiator_iqns=None):
    try:
        rtsroot = rtslib.root.RTSRoot()
    except rtslib.utils.RTSLibError:
        print(_('Ensure that configfs is mounted at /sys/kernel/config.'))
        raise
    # Look to see if BlockStorageObject already exists
    for x in rtsroot.storage_objects:
        if x.name == name:
            # Already exists, use this one
            return
    so_new = rtslib.BlockStorageObject(name=name,
                                       dev=backing_device)
    target_new = rtslib.Target(rtslib.FabricModule('iscsi'), name, 'create')
    tpg_new = rtslib.TPG(target_new, mode='create')
    tpg_new.set_attribute('authentication', '1')
    lun_new = rtslib.LUN(tpg_new, storage_object=so_new)
    if initiator_iqns:
        initiator_iqns = initiator_iqns.strip(' ')
        for i in initiator_iqns.split(','):
            acl_new = rtslib.NodeACL(tpg_new, i, mode='create')
            acl_new.chap_userid = userid
            acl_new.chap_password = password
            rtslib.MappedLUN(acl_new, lun_new.lun, lun_new.lun)
    tpg_new.enable = 1
    try:
        rtslib.NetworkPortal(tpg_new, '0.0.0.0', 3260, mode='any')
    except rtslib.utils.RTSLibError:
        print(_('Error creating NetworkPortal: ensure port 3260 '
                'is not in use by another service.'))
        raise
    try:
        rtslib.NetworkPortal(tpg_new, '::0', 3260, mode='any')
    except rtslib.utils.RTSLibError:
        # TODO(emh): Binding to IPv6 fails sometimes -- let pass for now.
        pass
def _lookup_target(target_iqn, initiator_iqn):
    try:
        rtsroot = rtslib.root.RTSRoot()
    except rtslib.utils.RTSLibError:
        print(_('Ensure that configfs is mounted at /sys/kernel/config.'))
        raise
    # Look for the target
    for t in rtsroot.targets:
        if t.wwn == target_iqn:
            return t
    raise RtstoolError(_('Could not find target %s') % target_iqn)
def add_initiator(target_iqn, initiator_iqn, userid, password):
    target = _lookup_target(target_iqn, initiator_iqn)
    tpg = target.tpgs.next()  # get the first one
    for acl in tpg.node_acls:
        # See if this ACL configuration already exists
        if acl.node_wwn == initiator_iqn:
            # No further action required
            return
    acl_new = rtslib.NodeACL(tpg, initiator_iqn, mode='create')
    acl_new.chap_userid = userid
    acl_new.chap_password = password
    rtslib.MappedLUN(acl_new, 0, tpg_lun=0)
def delete_initiator(target_iqn, initiator_iqn):
    target = _lookup_target(target_iqn, initiator_iqn)
    tpg = target.tpgs.next()  # get the first one
    for acl in tpg.node_acls:
        if acl.node_wwn == initiator_iqn:
            acl.delete()
            return
    raise RtstoolError(_('Could not find ACL %(acl)s in target %(target)s')
                       % {'target': target_iqn, 'acl': initiator_iqn})
def get_targets():
    rtsroot = rtslib.root.RTSRoot()
    for x in rtsroot.targets:
        print(x.wwn)
def delete(iqn):
    rtsroot = rtslib.root.RTSRoot()
    for x in rtsroot.targets:
        if x.wwn == iqn:
            x.delete()
            break
    for x in rtsroot.storage_objects:
        if x.name == iqn:
            x.delete()
            break
def verify_rtslib():
    for member in ['BlockStorageObject', 'FabricModule', 'LUN',
                   'MappedLUN', 'NetworkPortal', 'NodeACL', 'root',
                   'Target', 'TPG']:
        if not hasattr(rtslib, member):
            raise RtstoolImportError(_("rtslib is missing member %s: "
                                       "You may need a newer python-rtslib.") %
                                     member)
def usage():
    print("Usage:")
    print(sys.argv[0] +
          " create [device] [name] [userid] [password]" +
          " <initiator_iqn,iqn2,iqn3,...>")
    print(sys.argv[0] +
          " add-initiator [target_iqn] [userid] [password] [initiator_iqn]")
    print(sys.argv[0] +
          " delete-initiator [target_iqn] [initiator_iqn]")
    print(sys.argv[0] + " get-targets")
    print(sys.argv[0] + " delete [iqn]")
    print(sys.argv[0] + " verify")
    sys.exit(1)
def main(argv=None):
    if argv is None:
        argv = sys.argv
    if len(argv) < 2:
        usage()
    if argv[1] == 'create':
        if len(argv) < 6:
            usage()
        if len(argv) > 7:
            usage()
        backing_device = argv[2]
        name = argv[3]
        userid = argv[4]
        password = argv[5]
        initiator_iqns = None
        if len(argv) > 6:
            initiator_iqns = argv[6]
        create(backing_device, name, userid, password, initiator_iqns)
    elif argv[1] == 'add-initiator':
        if len(argv) < 6:
            usage()
        target_iqn = argv[2]
        userid = argv[3]
        password = argv[4]
        initiator_iqn = argv[5]
        add_initiator(target_iqn, initiator_iqn, userid, password)
    elif argv[1] == 'delete-initiator':
        if len(argv) < 4:
            usage()
        target_iqn = argv[2]
        initiator_iqn = argv[3]
        delete_initiator(target_iqn, initiator_iqn)
    elif argv[1] == 'get-targets':
        get_targets()
    elif argv[1] == 'delete':
        if len(argv) < 3:
            usage()
        iqn = argv[2]
        delete(iqn)
    elif argv[1] == 'verify':
        # This is used to verify that this script can be called by cinder,
        # and that rtslib is new enough to work.
        verify_rtslib()
        return 0
    else:
        usage()
    return 0
 | |
| 
	# coding: utf-8
"""
    Wavefront REST API Documentation
    <p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p>  # noqa: E501
    OpenAPI spec version: v2
    Contact: [email protected]
    Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re  # noqa: F401
import six
from wavefront_api_client.configuration import Configuration
class IntegrationDashboard(object):
    """NOTE: This class is auto generated by the swagger code generator program.
    Do not edit the class manually.
    """
    """
    Attributes:
      swagger_types (dict): The key is attribute name
                            and the value is attribute type.
      attribute_map (dict): The key is attribute name
                            and the value is json key in definition.
    """
    swagger_types = {
        'dashboard_min_obj': 'DashboardMin',
        'dashboard_obj': 'Dashboard',
        'description': 'str',
        'name': 'str',
        'url': 'str'
    }
    attribute_map = {
        'dashboard_min_obj': 'dashboardMinObj',
        'dashboard_obj': 'dashboardObj',
        'description': 'description',
        'name': 'name',
        'url': 'url'
    }
    def __init__(self, dashboard_min_obj=None, dashboard_obj=None, description=None, name=None, url=None, _configuration=None):  # noqa: E501
        """IntegrationDashboard - a model defined in Swagger"""  # noqa: E501
        if _configuration is None:
            _configuration = Configuration()
        self._configuration = _configuration
        self._dashboard_min_obj = None
        self._dashboard_obj = None
        self._description = None
        self._name = None
        self._url = None
        self.discriminator = None
        if dashboard_min_obj is not None:
            self.dashboard_min_obj = dashboard_min_obj
        if dashboard_obj is not None:
            self.dashboard_obj = dashboard_obj
        self.description = description
        self.name = name
        self.url = url
    @property
    def dashboard_min_obj(self):
        """Gets the dashboard_min_obj of this IntegrationDashboard.  # noqa: E501
        :return: The dashboard_min_obj of this IntegrationDashboard.  # noqa: E501
        :rtype: DashboardMin
        """
        return self._dashboard_min_obj
    @dashboard_min_obj.setter
    def dashboard_min_obj(self, dashboard_min_obj):
        """Sets the dashboard_min_obj of this IntegrationDashboard.
        :param dashboard_min_obj: The dashboard_min_obj of this IntegrationDashboard.  # noqa: E501
        :type: DashboardMin
        """
        self._dashboard_min_obj = dashboard_min_obj
    @property
    def dashboard_obj(self):
        """Gets the dashboard_obj of this IntegrationDashboard.  # noqa: E501
        :return: The dashboard_obj of this IntegrationDashboard.  # noqa: E501
        :rtype: Dashboard
        """
        return self._dashboard_obj
    @dashboard_obj.setter
    def dashboard_obj(self, dashboard_obj):
        """Sets the dashboard_obj of this IntegrationDashboard.
        :param dashboard_obj: The dashboard_obj of this IntegrationDashboard.  # noqa: E501
        :type: Dashboard
        """
        self._dashboard_obj = dashboard_obj
    @property
    def description(self):
        """Gets the description of this IntegrationDashboard.  # noqa: E501
        Dashboard description  # noqa: E501
        :return: The description of this IntegrationDashboard.  # noqa: E501
        :rtype: str
        """
        return self._description
    @description.setter
    def description(self, description):
        """Sets the description of this IntegrationDashboard.
        Dashboard description  # noqa: E501
        :param description: The description of this IntegrationDashboard.  # noqa: E501
        :type: str
        """
        if self._configuration.client_side_validation and description is None:
            raise ValueError("Invalid value for `description`, must not be `None`")  # noqa: E501
        self._description = description
    @property
    def name(self):
        """Gets the name of this IntegrationDashboard.  # noqa: E501
        Dashboard name  # noqa: E501
        :return: The name of this IntegrationDashboard.  # noqa: E501
        :rtype: str
        """
        return self._name
    @name.setter
    def name(self, name):
        """Sets the name of this IntegrationDashboard.
        Dashboard name  # noqa: E501
        :param name: The name of this IntegrationDashboard.  # noqa: E501
        :type: str
        """
        if self._configuration.client_side_validation and name is None:
            raise ValueError("Invalid value for `name`, must not be `None`")  # noqa: E501
        self._name = name
    @property
    def url(self):
        """Gets the url of this IntegrationDashboard.  # noqa: E501
        URL path to the JSON definition of this dashboard  # noqa: E501
        :return: The url of this IntegrationDashboard.  # noqa: E501
        :rtype: str
        """
        return self._url
    @url.setter
    def url(self, url):
        """Sets the url of this IntegrationDashboard.
        URL path to the JSON definition of this dashboard  # noqa: E501
        :param url: The url of this IntegrationDashboard.  # noqa: E501
        :type: str
        """
        if self._configuration.client_side_validation and url is None:
            raise ValueError("Invalid value for `url`, must not be `None`")  # noqa: E501
        self._url = url
    def to_dict(self):
        """Returns the model properties as a dict"""
        result = {}
        for attr, _ in six.iteritems(self.swagger_types):
            value = getattr(self, attr)
            if isinstance(value, list):
                result[attr] = list(map(
                    lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
                    value
                ))
            elif hasattr(value, "to_dict"):
                result[attr] = value.to_dict()
            elif isinstance(value, dict):
                result[attr] = dict(map(
                    lambda item: (item[0], item[1].to_dict())
                    if hasattr(item[1], "to_dict") else item,
                    value.items()
                ))
            else:
                result[attr] = value
        if issubclass(IntegrationDashboard, dict):
            for key, value in self.items():
                result[key] = value
        return result
    def to_str(self):
        """Returns the string representation of the model"""
        return pprint.pformat(self.to_dict())
    def __repr__(self):
        """For `print` and `pprint`"""
        return self.to_str()
    def __eq__(self, other):
        """Returns true if both objects are equal"""
        if not isinstance(other, IntegrationDashboard):
            return False
        return self.to_dict() == other.to_dict()
    def __ne__(self, other):
        """Returns true if both objects are not equal"""
        if not isinstance(other, IntegrationDashboard):
            return True
        return self.to_dict() != other.to_dict()
 | |
| 
	# HologramCloud.py - Hologram Python SDK Cloud interface
#
# Author: Hologram <[email protected]>
#
# Copyright 2016 - Hologram (Konekt, Inc.)
#
#
# LICENSE: Distributed under the terms of the MIT License
#
import binascii
import json
import sys
from Hologram.CustomCloud import CustomCloud
from HologramAuth import TOTPAuthentication, SIMOTPAuthentication
from Hologram.Authentication import CSRPSKAuthentication
from Exceptions.HologramError import HologramError
DEFAULT_SEND_MESSAGE_TIMEOUT = 5
HOLOGRAM_HOST_SEND = 'cloudsocket.hologram.io'
HOLOGRAM_PORT_SEND = 9999
HOLOGRAM_HOST_RECEIVE = '0.0.0.0'
HOLOGRAM_PORT_RECEIVE = 4010
MAX_SMS_LENGTH = 160
# Hologram error codes
ERR_OK = 0
ERR_CONNCLOSED = 1 # Connection was closed before a terminating character
                   # but message might be fine
ERR_MSGINVALID = 2 # Couldn't parse the message
ERR_AUTHINVALID = 3 # Auth section of message was invalid
ERR_PAYLOADINVALID = 4 # Payload type was invalid
ERR_PROTINVALID = 5 # Protocol type was invalid
ERR_INTERNAL = 6 # An internal error occurred
ERR_METADATA = 7 # Metadata was formatted incorrectly
ERR_TOPICINVALID = 8 # Topic was formatted incorrectly
ERR_UNKNOWN = -1 # Unknown error
class HologramCloud(CustomCloud):
    _authentication_handlers = {
        'csrpsk' : CSRPSKAuthentication.CSRPSKAuthentication,
        'totp' : TOTPAuthentication.TOTPAuthentication,
        'sim-otp' : SIMOTPAuthentication.SIMOTPAuthentication,
    }
    _errorCodeDescription = {
        ERR_OK: 'Message sent successfully',
        ERR_CONNCLOSED: 'Connection was closed so we couldn\'t read the whole message',
        ERR_MSGINVALID: 'Failed to parse the message',
        ERR_AUTHINVALID: 'Auth section of the message was invalid',
        ERR_PAYLOADINVALID: 'Payload type was invalid',
        ERR_PROTINVALID: 'Protocol type was invalid',
        ERR_INTERNAL: 'Internal error in Hologram Cloud',
        ERR_METADATA: 'Metadata was formatted incorrectly',
        ERR_TOPICINVALID: 'Topic was formatted incorrectly',
        ERR_UNKNOWN: 'Unknown error'
    }
    def __init__(self, credentials, enable_inbound=False, network='',
                 authentication_type='totp'):
        super().__init__(credentials,
                         send_host=HOLOGRAM_HOST_SEND,
                         send_port=HOLOGRAM_PORT_SEND,
                         receive_host=HOLOGRAM_HOST_RECEIVE,
                         receive_port=HOLOGRAM_PORT_RECEIVE,
                         enable_inbound=enable_inbound,
                         network=network)
        self.setAuthenticationType(credentials, authentication_type=authentication_type)
        if self.authenticationType == 'totp':
            self.__populate_totp_credentials()
    # EFFECTS: Authentication Configuration
    def setAuthenticationType(self, credentials, authentication_type='csrpsk'):
        if authentication_type not in HologramCloud._authentication_handlers:
            raise HologramError('Invalid authentication type: %s' % authentication_type)
        self.authenticationType = authentication_type
        self.authentication = HologramCloud._authentication_handlers[self.authenticationType](credentials)
    # EFFECTS: Sends the message to the cloud.
    def sendMessage(self, message, topics=None, timeout=DEFAULT_SEND_MESSAGE_TIMEOUT):
        if not self.is_ready_to_send():
            self.addPayloadToBuffer(message)
            return ''
        # Set the appropriate credentials required for sim otp authentication.
        if self.authenticationType == 'sim-otp':
            self.__populate_sim_otp_credentials()
        modem_type = None
        modem_id = None
        if self.network is not None:
            modem_id = self.network.modem_id
            modem_type = str(self.network.modem)
        output = self.authentication.buildPayloadString(message,
                                                        topics=topics,
                                                        modem_type=modem_type,
                                                        modem_id=modem_id,
                                                        version=self.version)
        result = super().sendMessage(output, timeout)
        return self.__parse_result(result)
    def __parse_result(self, result):
        resultList = None
        if self.authenticationType == 'csrpsk':
            resultList = self.__parse_hologram_json_result(result)
        else:
            resultList = self.__parse_hologram_compact_result(result)
        return resultList[0]
    def __populate_totp_credentials(self):
        try:
            self.authentication.credentials['device_id'] = self.network.iccid
            self.authentication.credentials['private_key'] = self.network.imsi
        except Exception as e:
            self.logger.error('Unable to fetch device id or private key')
    def __populate_sim_otp_credentials(self):
        nonce = self.request_hex_nonce()
        command = self.authentication.generate_sim_otp_command(imsi=self.network.imsi,
                                                               iccid=self.network.iccid,
                                                               nonce=nonce)
        modem_response = self.network.get_sim_otp_response(command)
        self.authentication.generate_sim_otp_token(modem_response)
    def sendSMS(self, destination_number, message):
        self.__enforce_authentication_type_supported_for_sms()
        self.__enforce_valid_destination_number(destination_number)
        self.__enforce_max_sms_length(message)
        output = self.authentication.buildSMSPayloadString(destination_number,
                                                           message)
        self.logger.debug('Destination number: %s', destination_number)
        self.logger.debug('SMS: %s', message)
        result = super().sendMessage(output)
        resultList = self.__parse_hologram_compact_result(result)
        return resultList[0]
    # REQUIRES: Called only when sim otp authentication is required.
    # EFFECTS: Request for a hex nonce.
    def request_hex_nonce(self):
        self.open_send_socket()
        # build nonce request payload string
        nonce_request = self.authentication.buildNonceRequestPayloadString()
        self.logger.debug("Sending nonce request with body of length %d", len(nonce_request))
        self.logger.debug('Send: %s', nonce_request)
        nonce = super().sendMessage(message=nonce_request, timeout=10, close_socket=False)
        self.logger.debug('Nonce request sent.')
        resultbuf_hex = binascii.b2a_hex(nonce)
        if resultbuf_hex is None:
            raise HologramError('Internal nonce error')
        return resultbuf_hex
    def enableSMS(self):
        return self.network.enableSMS()
    def disableSMS(self):
        return self.network.disableSMS()
    def popReceivedSMS(self):
        return self.network.popReceivedSMS()
    # EFFECTS: Parses the hologram send response.
    def __parse_hologram_json_result(self, result):
        try:
            resultList = json.loads(result)
            if isinstance(resultList, bytes):
                resultList[0] = int(chr(resultList[0]))
            else:
                resultList[0] = int(resultList[0])
        except ValueError:
            self.logger.error('Server replied with invalid JSON [%s]', result)
            resultList = [ERR_UNKNOWN]
        return resultList
    def __parse_hologram_compact_result(self, result):
        # convert the returned response to formatted list.
        if result is None:
            return [ERR_UNKNOWN]
        resultList = []
        if isinstance(result, bytes):
            for x in result:
                resultList.append(int(chr(x)))
        else:
            for x in result:
                resultList.append(int(x))
        if len(resultList) == 0:
            resultList = [ERR_UNKNOWN]
        return resultList
    def __enforce_max_sms_length(self, message):
        if len(message) > MAX_SMS_LENGTH:
            raise HologramError('SMS cannot be more than %d characters long' % MAX_SMS_LENGTH)
    def __enforce_valid_destination_number(self, destination_number):
        if not destination_number.startswith('+'):
            raise HologramError('SMS destination number must start with a \'+\' sign')
    def __enforce_authentication_type_supported_for_sms(self):
        if self.authenticationType != 'csrpsk':
            raise HologramError('%s does not support SDK SMS features' % self.authenticationType)
    # REQUIRES: A result code (int).
    # EFFECTS: Returns a translated string based on the given hologram result code.
    def getResultString(self, result_code):
        if result_code not in self._errorCodeDescription:
            return 'Unknown response code'
        return self._errorCodeDescription[result_code]
    def resultWasSuccess(self, result_code):
        return result_code in (ERR_OK, ERR_CONNCLOSED)
 | |
| 
	# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.
"""
A connection to the VMware vCenter platform.
"""
import re
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_vmware import api
from oslo_vmware import exceptions as vexc
from oslo_vmware import pbm
from oslo_vmware import vim
from oslo_vmware import vim_util
from nova.compute import task_states
from nova.compute import vm_states
from nova import exception
from nova import utils
from nova.i18n import _, _LI, _LE, _LW
from nova import objects
from nova.virt import driver
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import host
from nova.virt.vmwareapi import vim_util as nova_vim_util
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmops
from nova.virt.vmwareapi import volumeops
LOG = logging.getLogger(__name__)
vmwareapi_opts = [
    cfg.StrOpt('host_ip',
               help='Hostname or IP address for connection to VMware '
                    'vCenter host.'),
    cfg.IntOpt('host_port',
               default=443,
               help='Port for connection to VMware vCenter host.'),
    cfg.StrOpt('host_username',
               help='Username for connection to VMware vCenter host.'),
    cfg.StrOpt('host_password',
               help='Password for connection to VMware vCenter host.',
               secret=True),
    cfg.StrOpt('ca_file',
               help='Specify a CA bundle file to use in verifying the '
                    'vCenter server certificate.'),
    cfg.BoolOpt('insecure',
                default=False,
                help='If true, the vCenter server certificate is not '
                     'verified. If false, then the default CA truststore is '
                     'used for verification. This option is ignored if '
                     '"ca_file" is set.'),
    cfg.StrOpt('cluster_name',
               help='Name of a VMware Cluster ComputeResource.'),
    cfg.StrOpt('datastore_regex',
               help='Regex to match the name of a datastore.'),
    cfg.FloatOpt('task_poll_interval',
                 default=0.5,
                 help='The interval used for polling of remote tasks.'),
    cfg.IntOpt('api_retry_count',
               default=10,
               help='The number of times we retry on failures, e.g., '
                    'socket error, etc.'),
    cfg.IntOpt('vnc_port',
               default=5900,
               help='VNC starting port'),
    cfg.IntOpt('vnc_port_total',
               default=10000,
               help='Total number of VNC ports'),
    cfg.BoolOpt('use_linked_clone',
                default=True,
                help='Whether to use linked clone'),
    cfg.StrOpt('wsdl_location',
               help='Optional VIM Service WSDL Location '
                    'e.g http://<server>/vimService.wsdl. '
                    'Optional over-ride to default location for bug '
                    'work-arounds')
    ]
spbm_opts = [
    cfg.BoolOpt('pbm_enabled',
                default=False,
                help='The PBM status.'),
    cfg.StrOpt('pbm_wsdl_location',
               help='PBM service WSDL file location URL. '
                    'e.g. file:///opt/SDK/spbm/wsdl/pbmService.wsdl '
                    'Not setting this will disable storage policy based '
                    'placement of instances.'),
    cfg.StrOpt('pbm_default_policy',
               help='The PBM default policy. If pbm_wsdl_location is set and '
                    'there is no defined storage policy for the specific '
                    'request then this policy will be used.'),
    ]
CONF = cfg.CONF
CONF.register_opts(vmwareapi_opts, 'vmware')
CONF.register_opts(spbm_opts, 'vmware')
TIME_BETWEEN_API_CALL_RETRIES = 1.0
class VMwareVCDriver(driver.ComputeDriver):
    """The VC host connection object."""
    capabilities = {
        "has_imagecache": True,
        "supports_recreate": False,
        "supports_migrate_to_same_host": True
    }
    # Legacy nodename is of the form: <mo id>(<cluster name>)
    # e.g. domain-26(TestCluster)
    # We assume <mo id> consists of alphanumeric, _ and -.
    # We assume cluster name is everything between the first ( and the last ).
    # We pull out <mo id> for re-use.
    LEGACY_NODENAME = re.compile('([\w-]+)\(.+\)')
    # The vCenter driver includes API that acts on ESX hosts or groups
    # of ESX hosts in clusters or non-cluster logical-groupings.
    #
    # vCenter is not a hypervisor itself, it works with multiple
    # hypervisor host machines and their guests. This fact can
    # subtly alter how vSphere and OpenStack interoperate.
    def __init__(self, virtapi, scheme="https"):
        super(VMwareVCDriver, self).__init__(virtapi)
        if (CONF.vmware.host_ip is None or
            CONF.vmware.host_username is None or
            CONF.vmware.host_password is None):
            raise Exception(_("Must specify host_ip, host_username and "
                              "host_password to use vmwareapi.VMwareVCDriver"))
        self._datastore_regex = None
        if CONF.vmware.datastore_regex:
            try:
                self._datastore_regex = re.compile(CONF.vmware.datastore_regex)
            except re.error:
                raise exception.InvalidInput(reason=
                    _("Invalid Regular Expression %s")
                    % CONF.vmware.datastore_regex)
        self._session = VMwareAPISession(scheme=scheme)
        self._check_min_version()
        # Update the PBM location if necessary
        if CONF.vmware.pbm_enabled:
            self._update_pbm_location()
        self._validate_configuration()
        self._cluster_name = CONF.vmware.cluster_name
        self._cluster_ref = vm_util.get_cluster_ref_by_name(self._session,
                                                            self._cluster_name)
        if self._cluster_ref is None:
            raise exception.NotFound(_("The specified cluster '%s' was not "
                                       "found in vCenter")
                                     % self._cluster_name)
        self._vcenter_uuid = self._get_vcenter_uuid()
        self._nodename = self._create_nodename(self._cluster_ref.value)
        self._volumeops = volumeops.VMwareVolumeOps(self._session,
                                                    self._cluster_ref)
        self._vmops = vmops.VMwareVMOps(self._session,
                                        virtapi,
                                        self._volumeops,
                                        self._cluster_ref,
                                        datastore_regex=self._datastore_regex)
        self._vc_state = host.VCState(self._session,
                                      self._nodename,
                                      self._cluster_ref,
                                      self._datastore_regex)
        # Register the OpenStack extension
        self._register_openstack_extension()
    def _check_min_version(self):
        min_version = utils.convert_version_to_int(constants.MIN_VC_VERSION)
        vc_version = vim_util.get_vc_version(self._session)
        LOG.info(_LI("VMware vCenter version: %s"), vc_version)
        if min_version > utils.convert_version_to_int(vc_version):
            # TODO(garyk): enforce this from M
            LOG.warning(_LW('Running Nova with a VMware vCenter version less '
                            'than %(version)s is deprecated. The required '
                            'minimum version of vCenter will be raised to '
                            '%(version)s in the 13.0.0 release.'),
                        {'version': constants.MIN_VC_VERSION})
    @property
    def need_legacy_block_device_info(self):
        return False
    def _update_pbm_location(self):
        if CONF.vmware.pbm_wsdl_location:
            pbm_wsdl_loc = CONF.vmware.pbm_wsdl_location
        else:
            version = vim_util.get_vc_version(self._session)
            pbm_wsdl_loc = pbm.get_pbm_wsdl_location(version)
        self._session.pbm_wsdl_loc_set(pbm_wsdl_loc)
    def _validate_configuration(self):
        if CONF.vmware.pbm_enabled:
            if not CONF.vmware.pbm_default_policy:
                raise error_util.PbmDefaultPolicyUnspecified()
            if not pbm.get_profile_id_by_name(
                            self._session,
                            CONF.vmware.pbm_default_policy):
                raise error_util.PbmDefaultPolicyDoesNotExist()
            if CONF.vmware.datastore_regex:
                LOG.warning(_LW(
                    "datastore_regex is ignored when PBM is enabled"))
                self._datastore_regex = None
    def init_host(self, host):
        vim = self._session.vim
        if vim is None:
            self._session._create_session()
    def cleanup_host(self, host):
        self._session.logout()
    def _register_openstack_extension(self):
        # Register an 'OpenStack' extension in vCenter
        LOG.debug('Registering extension %s with vCenter',
                  constants.EXTENSION_KEY)
        os_extension = self._session._call_method(vim_util, 'find_extension',
                                                  constants.EXTENSION_KEY)
        if os_extension is None:
            LOG.debug('Extension does not exist. Registering type %s.',
                      constants.EXTENSION_TYPE_INSTANCE)
            self._session._call_method(vim_util, 'register_extension',
                                       constants.EXTENSION_KEY,
                                       constants.EXTENSION_TYPE_INSTANCE)
    def cleanup(self, context, instance, network_info, block_device_info=None,
                destroy_disks=True, migrate_data=None, destroy_vifs=True):
        """Cleanup after instance being destroyed by Hypervisor."""
        pass
    def resume_state_on_host_boot(self, context, instance, network_info,
                                  block_device_info=None):
        """resume guest state when a host is booted."""
        # Check if the instance is running already and avoid doing
        # anything if it is.
        state = vm_util.get_vm_state(self._session, instance)
        ignored_states = ['poweredon', 'suspended']
        if state.lower() in ignored_states:
            return
        # Instance is not up and could be in an unknown state.
        # Be as absolute as possible about getting it back into
        # a known and running state.
        self.reboot(context, instance, network_info, 'hard',
                    block_device_info)
    def list_instance_uuids(self):
        """List VM instance UUIDs."""
        return self._vmops.list_instances()
    def list_instances(self):
        """List VM instances from the single compute node."""
        return self._vmops.list_instances()
    def migrate_disk_and_power_off(self, context, instance, dest,
                                   flavor, network_info,
                                   block_device_info=None,
                                   timeout=0, retry_interval=0):
        """Transfers the disk of a running instance in multiple phases, turning
        off the instance before the end.
        """
        # TODO(PhilDay): Add support for timeout (clean shutdown)
        return self._vmops.migrate_disk_and_power_off(context, instance,
                                                      dest, flavor)
    def confirm_migration(self, migration, instance, network_info):
        """Confirms a resize, destroying the source VM."""
        self._vmops.confirm_migration(migration, instance, network_info)
    def finish_revert_migration(self, context, instance, network_info,
                                block_device_info=None, power_on=True):
        """Finish reverting a resize, powering back on the instance."""
        self._vmops.finish_revert_migration(context, instance, network_info,
                                            block_device_info, power_on)
    def finish_migration(self, context, migration, instance, disk_info,
                         network_info, image_meta, resize_instance,
                         block_device_info=None, power_on=True):
        """Completes a resize, turning on the migrated instance."""
        image_meta = objects.ImageMeta.from_dict(image_meta)
        self._vmops.finish_migration(context, migration, instance, disk_info,
                                     network_info, image_meta, resize_instance,
                                     block_device_info, power_on)
    def live_migration(self, context, instance, dest,
                       post_method, recover_method, block_migration=False,
                       migrate_data=None):
        """Live migration of an instance to another host."""
        self._vmops.live_migration(context, instance, dest,
                                   post_method, recover_method,
                                   block_migration)
    def rollback_live_migration_at_destination(self, context, instance,
                                               network_info,
                                               block_device_info,
                                               destroy_disks=True,
                                               migrate_data=None):
        """Clean up destination node after a failed live migration."""
        self.destroy(context, instance, network_info, block_device_info)
    def get_instance_disk_info(self, instance, block_device_info=None):
        pass
    def get_vnc_console(self, context, instance):
        """Return link to instance's VNC console using vCenter logic."""
        # vCenter does not actually run the VNC service
        # itself. You must talk to the VNC host underneath vCenter.
        return self._vmops.get_vnc_console(instance)
    def get_mks_console(self, context, instance):
        return self._vmops.get_mks_console(instance)
    def _get_vcenter_uuid(self):
        """Retrieves the vCenter UUID."""
        about = self._session._call_method(nova_vim_util, 'get_about_info')
        return about.instanceUuid
    def _create_nodename(self, mo_id):
        """Return a nodename which uniquely describes a cluster.
        The name will be of the form:
          <mo id>.<vcenter uuid>
        e.g.
          domain-26.9d51f082-58a4-4449-beed-6fd205a5726b
        """
        return '%s.%s' % (mo_id, self._vcenter_uuid)
    def _get_available_resources(self, host_stats):
        return {'vcpus': host_stats['vcpus'],
               'memory_mb': host_stats['host_memory_total'],
               'local_gb': host_stats['disk_total'],
               'vcpus_used': 0,
               'memory_mb_used': host_stats['host_memory_total'] -
                                 host_stats['host_memory_free'],
               'local_gb_used': host_stats['disk_used'],
               'hypervisor_type': host_stats['hypervisor_type'],
               'hypervisor_version': host_stats['hypervisor_version'],
               'hypervisor_hostname': host_stats['hypervisor_hostname'],
                # The VMWare driver manages multiple hosts, so there are
                # likely many different CPU models in use. As such it is
                # impossible to provide any meaningful info on the CPU
                # model of the "host"
               'cpu_info': None,
               'supported_instances': jsonutils.dumps(
                   host_stats['supported_instances']),
               'numa_topology': None,
               }
    def get_available_resource(self, nodename):
        """Retrieve resource info.
        This method is called when nova-compute launches, and
        as part of a periodic task.
        :returns: dictionary describing resources
        """
        host_stats = self._vc_state.get_host_stats(refresh=True)
        stats_dict = self._get_available_resources(host_stats)
        return stats_dict
    def get_available_nodes(self, refresh=False):
        """Returns nodenames of all nodes managed by the compute service.
        This driver supports only one compute node.
        """
        return [self._nodename]
    def spawn(self, context, instance, image_meta, injected_files,
              admin_password, network_info=None, block_device_info=None):
        """Create VM instance."""
        image_meta = objects.ImageMeta.from_dict(image_meta)
        self._vmops.spawn(context, instance, image_meta, injected_files,
                          admin_password, network_info, block_device_info)
    def attach_volume(self, context, connection_info, instance, mountpoint,
                      disk_bus=None, device_type=None, encryption=None):
        """Attach volume storage to VM instance."""
        return self._volumeops.attach_volume(connection_info, instance)
    def detach_volume(self, connection_info, instance, mountpoint,
                      encryption=None):
        """Detach volume storage to VM instance."""
        return self._volumeops.detach_volume(connection_info, instance)
    def get_volume_connector(self, instance):
        """Return volume connector information."""
        return self._volumeops.get_volume_connector(instance)
    def get_host_ip_addr(self):
        """Returns the IP address of the vCenter host."""
        return CONF.vmware.host_ip
    def snapshot(self, context, instance, image_id, update_task_state):
        """Create snapshot from a running VM instance."""
        self._vmops.snapshot(context, instance, image_id, update_task_state)
    def reboot(self, context, instance, network_info, reboot_type,
               block_device_info=None, bad_volumes_callback=None):
        """Reboot VM instance."""
        self._vmops.reboot(instance, network_info, reboot_type)
    def _detach_instance_volumes(self, instance, block_device_info):
        # We need to detach attached volumes
        block_device_mapping = driver.block_device_info_get_mapping(
            block_device_info)
        if block_device_mapping:
            # Certain disk types, for example 'IDE' do not support hot
            # plugging. Hence we need to power off the instance and update
            # the instance state.
            self._vmops.power_off(instance)
            # TODO(garyk): update the volumeops to read the state form the
            # VM instead of relying on a instance flag
            instance.vm_state = vm_states.STOPPED
            for disk in block_device_mapping:
                connection_info = disk['connection_info']
                try:
                    self.detach_volume(connection_info, instance,
                                       disk.get('device_name'))
                except Exception as e:
                    with excutils.save_and_reraise_exception():
                        LOG.error(_LE("Failed to detach %(device_name)s. "
                                      "Exception: %(exc)s"),
                                  {'device_name': disk.get('device_name'),
                                   'exc': e},
                                  instance=instance)
    def destroy(self, context, instance, network_info, block_device_info=None,
                destroy_disks=True, migrate_data=None):
        """Destroy VM instance."""
        # Destroy gets triggered when Resource Claim in resource_tracker
        # is not successful. When resource claim is not successful,
        # node is not set in instance. Perform destroy only if node is set
        if not instance.node:
            return
        # A resize uses the same instance on the VC. We do not delete that
        # VM in the event of a revert
        if instance.task_state == task_states.RESIZE_REVERTING:
            return
        # We need to detach attached volumes
        if block_device_info is not None:
            try:
                self._detach_instance_volumes(instance, block_device_info)
            except vexc.ManagedObjectNotFoundException:
                LOG.warning(_LW('Instance does not exists. Proceeding to '
                                'delete instance properties on datastore'),
                            instance=instance)
        self._vmops.destroy(instance, destroy_disks)
    def pause(self, instance):
        """Pause VM instance."""
        self._vmops.pause(instance)
    def unpause(self, instance):
        """Unpause paused VM instance."""
        self._vmops.unpause(instance)
    def suspend(self, context, instance):
        """Suspend the specified instance."""
        self._vmops.suspend(instance)
    def resume(self, context, instance, network_info, block_device_info=None):
        """Resume the suspended VM instance."""
        self._vmops.resume(instance)
    def rescue(self, context, instance, network_info, image_meta,
               rescue_password):
        """Rescue the specified instance."""
        image_meta = objects.ImageMeta.from_dict(image_meta)
        self._vmops.rescue(context, instance, network_info, image_meta)
    def unrescue(self, instance, network_info):
        """Unrescue the specified instance."""
        self._vmops.unrescue(instance)
    def power_off(self, instance, timeout=0, retry_interval=0):
        """Power off the specified instance."""
        # TODO(PhilDay): Add support for timeout (clean shutdown)
        self._vmops.power_off(instance)
    def power_on(self, context, instance, network_info,
                 block_device_info=None):
        """Power on the specified instance."""
        self._vmops.power_on(instance)
    def poll_rebooting_instances(self, timeout, instances):
        """Poll for rebooting instances."""
        self._vmops.poll_rebooting_instances(timeout, instances)
    def get_info(self, instance):
        """Return info about the VM instance."""
        return self._vmops.get_info(instance)
    def get_diagnostics(self, instance):
        """Return data about VM diagnostics."""
        return self._vmops.get_diagnostics(instance)
    def get_instance_diagnostics(self, instance):
        """Return data about VM diagnostics."""
        return self._vmops.get_instance_diagnostics(instance)
    def host_power_action(self, action):
        """Host operations not supported by VC driver.
        This needs to override the ESX driver implementation.
        """
        raise NotImplementedError()
    def host_maintenance_mode(self, host, mode):
        """Host operations not supported by VC driver.
        This needs to override the ESX driver implementation.
        """
        raise NotImplementedError()
    def set_host_enabled(self, enabled):
        """Host operations not supported by VC driver.
        This needs to override the ESX driver implementation.
        """
        raise NotImplementedError()
    def get_host_uptime(self):
        """Host uptime operation not supported by VC driver."""
        msg = _("Multiple hosts may be managed by the VMWare "
                "vCenter driver; therefore we do not return "
                "uptime for just one host.")
        raise NotImplementedError(msg)
    def inject_network_info(self, instance, nw_info):
        """inject network info for specified instance."""
        self._vmops.inject_network_info(instance, nw_info)
    def manage_image_cache(self, context, all_instances):
        """Manage the local cache of images."""
        self._vmops.manage_image_cache(context, all_instances)
    def instance_exists(self, instance):
        """Efficient override of base instance_exists method."""
        return self._vmops.instance_exists(instance)
    def attach_interface(self, instance, image_meta, vif):
        """Attach an interface to the instance."""
        image_meta = objects.ImageMeta.from_dict(image_meta)
        self._vmops.attach_interface(instance, image_meta, vif)
    def detach_interface(self, instance, vif):
        """Detach an interface from the instance."""
        self._vmops.detach_interface(instance, vif)
class VMwareAPISession(api.VMwareAPISession):
    """Sets up a session with the VC/ESX host and handles all
    the calls made to the host.
    """
    def __init__(self, host_ip=CONF.vmware.host_ip,
                 host_port=CONF.vmware.host_port,
                 username=CONF.vmware.host_username,
                 password=CONF.vmware.host_password,
                 retry_count=CONF.vmware.api_retry_count,
                 scheme="https",
                 cacert=CONF.vmware.ca_file,
                 insecure=CONF.vmware.insecure):
        super(VMwareAPISession, self).__init__(
                host=host_ip,
                port=host_port,
                server_username=username,
                server_password=password,
                api_retry_count=retry_count,
                task_poll_interval=CONF.vmware.task_poll_interval,
                scheme=scheme,
                create_session=True,
                wsdl_loc=CONF.vmware.wsdl_location,
                cacert=cacert,
                insecure=insecure)
    def _is_vim_object(self, module):
        """Check if the module is a VIM Object instance."""
        return isinstance(module, vim.Vim)
    def _call_method(self, module, method, *args, **kwargs):
        """Calls a method within the module specified with
        args provided.
        """
        if not self._is_vim_object(module):
            return self.invoke_api(module, method, self.vim, *args, **kwargs)
        else:
            return self.invoke_api(module, method, *args, **kwargs)
    def _wait_for_task(self, task_ref):
        """Return a Deferred that will give the result of the given task.
        The task is polled until it completes.
        """
        return self.wait_for_task(task_ref)
 | |
| 
	# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
# Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from selenium.common.exceptions import (
    NoSuchElementException,
    NoSuchWindowException,
    TimeoutException,
    WebDriverException)
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def testGetTitle(driver, pages):
    pages.load("simpleTest.html")
    title = driver.title
    assert "Hello WebDriver" == title
def testGetCurrentUrl(driver, pages, webserver):
    pages.load("simpleTest.html")
    url = driver.current_url
    assert webserver.where_is('simpleTest.html') == url
def testFindElementsByXPath(driver, pages):
    pages.load("simpleTest.html")
    elem = driver.find_element_by_xpath("//h1")
    assert "Heading" == elem.text
def testFindElementByXpathThrowNoSuchElementException(driver, pages):
    pages.load("simpleTest.html")
    with pytest.raises(NoSuchElementException):
        driver.find_element_by_xpath("//h4")
def testFindElementsByXpath(driver, pages):
    pages.load("nestedElements.html")
    elems = driver.find_elements_by_xpath("//option")
    assert 48 == len(elems)
    assert "One" == elems[0].get_attribute("value")
def testFindElementsByName(driver, pages):
    pages.load("xhtmlTest.html")
    elem = driver.find_element_by_name("windowOne")
    assert "Open new window" == elem.text
def testFindElementsByNameInElementContext(driver, pages):
    pages.load("nestedElements.html")
    elem = driver.find_element_by_name("form2")
    sub_elem = elem.find_element_by_name("selectomatic")
    assert "2" == sub_elem.get_attribute("id")
def testFindElementsByLinkTextInElementContext(driver, pages):
    pages.load("nestedElements.html")
    elem = driver.find_element_by_name("div1")
    sub_elem = elem.find_element_by_link_text("hello world")
    assert "link1" == sub_elem.get_attribute("name")
def testFindElementByIdInElementContext(driver, pages):
    pages.load("nestedElements.html")
    elem = driver.find_element_by_name("form2")
    sub_elem = elem.find_element_by_id("2")
    assert "selectomatic" == sub_elem.get_attribute("name")
def testFindElementByXpathInElementContext(driver, pages):
    pages.load("nestedElements.html")
    elem = driver.find_element_by_name("form2")
    sub_elem = elem.find_element_by_xpath("select")
    assert "2" == sub_elem.get_attribute("id")
def testFindElementByXpathInElementContextNotFound(driver, pages):
    pages.load("nestedElements.html")
    elem = driver.find_element_by_name("form2")
    with pytest.raises(NoSuchElementException):
        elem.find_element_by_xpath("div")
def testShouldBeAbleToEnterDataIntoFormFields(driver, pages):
    pages.load("xhtmlTest.html")
    elem = driver.find_element_by_xpath("//form[@name='someForm']/input[@id='username']")
    elem.clear()
    elem.send_keys("some text")
    elem = driver.find_element_by_xpath("//form[@name='someForm']/input[@id='username']")
    assert "some text" == elem.get_attribute("value")
def testFindElementByTagName(driver, pages):
    pages.load("simpleTest.html")
    elems = driver.find_elements_by_tag_name("div")
    num_by_xpath = len(driver.find_elements_by_xpath("//div"))
    assert num_by_xpath == len(elems)
    elems = driver.find_elements_by_tag_name("iframe")
    assert 0 == len(elems)
def testFindElementByTagNameWithinElement(driver, pages):
    pages.load("simpleTest.html")
    div = driver.find_element_by_id("multiline")
    elems = div.find_elements_by_tag_name("p")
    assert len(elems) == 1
@pytest.mark.xfail_marionette(
    reason="W3C implementations can't switch to a window by name",
    raises=TimeoutException,
    run=False)
def testSwitchToWindow(driver, pages):
    title_1 = "XHTML Test Page"
    title_2 = "We Arrive Here"
    switch_to_window_timeout = 5
    wait = WebDriverWait(driver, switch_to_window_timeout, ignored_exceptions=[NoSuchWindowException])
    pages.load("xhtmlTest.html")
    driver.find_element_by_link_text("Open new window").click()
    assert title_1 == driver.title
    wait.until(lambda dr: dr.switch_to.window("result") is None)
    assert title_2 == driver.title
def testSwitchFrameByName(driver, pages):
    pages.load("frameset.html")
    driver.switch_to.frame(driver.find_element_by_name("third"))
    checkbox = driver.find_element_by_id("checky")
    checkbox.click()
    checkbox.submit()
def testIsEnabled(driver, pages):
    pages.load("formPage.html")
    elem = driver.find_element_by_xpath("//input[@id='working']")
    assert elem.is_enabled()
    elem = driver.find_element_by_xpath("//input[@id='notWorking']")
    assert not elem.is_enabled()
def testIsSelectedAndToggle(driver, pages):
    pages.load("formPage.html")
    elem = driver.find_element_by_id("multi")
    option_elems = elem.find_elements_by_xpath("option")
    assert option_elems[0].is_selected()
    option_elems[0].click()
    assert not option_elems[0].is_selected()
    option_elems[0].click()
    assert option_elems[0].is_selected()
    assert option_elems[2].is_selected()
def testNavigate(driver, pages):
    pages.load("formPage.html")
    driver.find_element_by_id("imageButton").submit()
    WebDriverWait(driver, 3).until(EC.title_is("We Arrive Here"))
    driver.back()
    assert "We Leave From Here" == driver.title
    driver.forward()
    assert "We Arrive Here" == driver.title
def testGetAttribute(driver, pages):
    url = pages.url('xhtmlTest.html')
    driver.get(url)
    elem = driver.find_element_by_id("id1")
    attr = elem.get_attribute("href")
    assert '{0}#'.format(url) == attr
def testGetImplicitAttribute(driver, pages):
    pages.load("nestedElements.html")
    elems = driver.find_elements_by_xpath("//option")
    assert len(elems) >= 3
    for i, elem in enumerate(elems[:3]):
        assert i == int(elem.get_attribute("index"))
def testExecuteSimpleScript(driver, pages):
    pages.load("xhtmlTest.html")
    title = driver.execute_script("return document.title;")
    assert "XHTML Test Page" == title
def testExecuteScriptAndReturnElement(driver, pages):
    pages.load("xhtmlTest.html")
    elem = driver.execute_script("return document.getElementById('id1');")
    assert "WebElement" in str(type(elem))
def testExecuteScriptWithArgs(driver, pages):
    pages.load("xhtmlTest.html")
    result = driver.execute_script("return arguments[0] == 'fish' ? 'fish' : 'not fish';", "fish")
    assert "fish" == result
def testExecuteScriptWithMultipleArgs(driver, pages):
    pages.load("xhtmlTest.html")
    result = driver.execute_script(
        "return arguments[0] + arguments[1]", 1, 2)
    assert 3 == result
def testExecuteScriptWithElementArgs(driver, pages):
    pages.load("javascriptPage.html")
    button = driver.find_element_by_id("plainButton")
    result = driver.execute_script("arguments[0]['flibble'] = arguments[0].getAttribute('id'); return arguments[0]['flibble'];", button)
    assert "plainButton" == result
def testFindElementsByPartialLinkText(driver, pages):
    pages.load("xhtmlTest.html")
    elem = driver.find_element_by_partial_link_text("new window")
    elem.click()
def testIsElementDisplayed(driver, pages):
    pages.load("javascriptPage.html")
    visible = driver.find_element_by_id("displayed").is_displayed()
    not_visible = driver.find_element_by_id("hidden").is_displayed()
    assert visible
    assert not not_visible
def testMoveWindowPosition(driver, pages):
    pages.load("blank.html")
    loc = driver.get_window_position()
    # note can't test 0,0 since some OS's dont allow that location
    # because of system toolbars
    new_x = 50
    new_y = 50
    if loc['x'] == new_x:
        new_x += 10
    if loc['y'] == new_y:
        new_y += 10
    driver.set_window_position(new_x, new_y)
    loc = driver.get_window_position()
    assert loc['x'] == new_x
    assert loc['y'] == new_y
def testChangeWindowSize(driver, pages):
    pages.load("blank.html")
    size = driver.get_window_size()
    newSize = [600, 600]
    if size['width'] == 600:
        newSize[0] = 500
    if size['height'] == 600:
        newSize[1] = 500
    driver.set_window_size(newSize[0], newSize[1])
    size = driver.get_window_size()
    assert size['width'] == newSize[0]
    assert size['height'] == newSize[1]
@pytest.mark.xfail_chrome(raises=WebDriverException)
@pytest.mark.xfail_marionette(raises=WebDriverException)
def testGetLogTypes(driver, pages):
    pages.load("blank.html")
    assert isinstance(driver.log_types, list)
@pytest.mark.xfail_chrome(raises=WebDriverException)
@pytest.mark.xfail_marionette(raises=WebDriverException)
def testGetLog(driver, pages):
    pages.load("blank.html")
    for log_type in driver.log_types:
        log = driver.get_log(log_type)
        assert isinstance(log, list)
 | |
| 
	"""
Base classes for writing management commands (named commands which can
be executed through ``django-admin.py`` or ``manage.py``).
"""
import os
import sys
from optparse import make_option, OptionParser
import django
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import color_style
try:
    set
except NameError:
    from sets import Set as set     # For Python 2.3
class CommandError(Exception):
    """
    Exception class indicating a problem while executing a management
    command.
    If this exception is raised during the execution of a management
    command, it will be caught and turned into a nicely-printed error
    message to the appropriate output stream (i.e., stderr); as a
    result, raising this exception (with a sensible description of the
    error) is the preferred way to indicate that something has gone
    wrong in the execution of a command.
    
    """
    pass
def handle_default_options(options):
    """
    Include any default options that all commands should accept here
    so that ManagementUtility can handle them before searching for
    user commands.
    
    """
    if options.settings:
        os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
    if options.pythonpath:
        sys.path.insert(0, options.pythonpath)
class BaseCommand(object):
    """
    The base class from which all management commands ultimately
    derive.
    Use this class if you want access to all of the mechanisms which
    parse the command-line arguments and work out what code to call in
    response; if you don't need to change any of that behavior,
    consider using one of the subclasses defined in this file.
    If you are interested in overriding/customizing various aspects of
    the command-parsing and -execution behavior, the normal flow works
    as follows:
    1. ``django-admin.py`` or ``manage.py`` loads the command class
       and calls its ``run_from_argv()`` method.
    2. The ``run_from_argv()`` method calls ``create_parser()`` to get
       an ``OptionParser`` for the arguments, parses them, performs
       any environment changes requested by options like
       ``pythonpath``, and then calls the ``execute()`` method,
       passing the parsed arguments.
    3. The ``execute()`` method attempts to carry out the command by
       calling the ``handle()`` method with the parsed arguments; any
       output produced by ``handle()`` will be printed to standard
       output and, if the command is intended to produce a block of
       SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
    4. If ``handle()`` raised a ``CommandError``, ``execute()`` will
       instead print an error message to ``stderr``.
    Thus, the ``handle()`` method is typically the starting point for
    subclasses; many built-in commands and command types either place
    all of their logic in ``handle()``, or perform some additional
    parsing work in ``handle()`` and then delegate from it to more
    specialized methods as needed.
    Several attributes affect behavior at various steps along the way:
    
    ``args``
        A string listing the arguments accepted by the command,
        suitable for use in help messages; e.g., a command which takes
        a list of application names might set this to '<appname
        appname ...>'.
    ``can_import_settings``
        A boolean indicating whether the command needs to be able to
        import Django settings; if ``True``, ``execute()`` will verify
        that this is possible before proceeding. Default value is
        ``True``.
    ``help``
        A short description of the command, which will be printed in
        help messages.
    ``option_list``
        This is the list of ``optparse`` options which will be fed
        into the command's ``OptionParser`` for parsing arguments.
    ``output_transaction``
        A boolean indicating whether the command outputs SQL
        statements; if ``True``, the output will automatically be
        wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is
        ``False``.
    ``requires_model_validation``
        A boolean; if ``True``, validation of installed models will be
        performed prior to executing the command. Default value is
        ``True``. To validate an individual application's models
        rather than all applications' models, call
        ``self.validate(app)`` from ``handle()``, where ``app`` is the
        application's Python module.
    
    """
    # Metadata about this command.
    option_list = (
        make_option('-v', '--verbosity', action='store', dest='verbosity', default='1',
            type='choice', choices=['0', '1', '2'],
            help='Verbosity level; 0=minimal output, 1=normal output, 2=all output'),
        make_option('--settings',
            help='The Python path to a settings module, e.g. "myproject.settings.main". If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.'),
        make_option('--pythonpath',
            help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".'),
        make_option('--traceback', action='store_true',
            help='Print traceback on exception'),
    )
    help = ''
    args = ''
    # Configuration shortcuts that alter various logic.
    can_import_settings = True
    requires_model_validation = True
    output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;"
    def __init__(self):
        self.style = color_style()
    def get_version(self):
        """
        Return the Django version, which should be correct for all
        built-in Django commands. User-supplied commands should
        override this method.
        
        """
        return django.get_version()
    def usage(self, subcommand):
        """
        Return a brief description of how to use this command, by
        default from the attribute ``self.help``.
        
        """
        usage = '%%prog %s [options] %s' % (subcommand, self.args)
        if self.help:
            return '%s\n\n%s' % (usage, self.help)
        else:
            return usage
    def create_parser(self, prog_name, subcommand):
        """
        Create and return the ``OptionParser`` which will be used to
        parse the arguments to this command.
        
        """
        return OptionParser(prog=prog_name,
                            usage=self.usage(subcommand),
                            version=self.get_version(),
                            option_list=self.option_list)
    def print_help(self, prog_name, subcommand):
        """
        Print the help message for this command, derived from
        ``self.usage()``.
        
        """
        parser = self.create_parser(prog_name, subcommand)
        parser.print_help()
    def run_from_argv(self, argv):
        """
        Set up any environment changes requested (e.g., Python path
        and Django settings), then run this command.
        
        """
        parser = self.create_parser(argv[0], argv[1])
        options, args = parser.parse_args(argv[2:])
        handle_default_options(options)
        self.execute(*args, **options.__dict__)
    def execute(self, *args, **options):
        """
        Try to execute this command, performing model validation if
        needed (as controlled by the attribute
        ``self.requires_model_validation``). If the command raises a
        ``CommandError``, intercept it and print it sensibly to
        stderr.
        
        """
        # Switch to English, because django-admin.py creates database content
        # like permissions, and those shouldn't contain any translations.
        # But only do this if we can assume we have a working settings file,
        # because django.utils.translation requires settings.
        if self.can_import_settings:
            try:
                from django.utils import translation
                translation.activate('en-us')
            except ImportError, e:
                # If settings should be available, but aren't,
                # raise the error and quit.
                sys.stderr.write(self.style.ERROR(str('Error: %s\n' % e)))
                sys.exit(1)
        try:
            if self.requires_model_validation:
                self.validate()
            output = self.handle(*args, **options)
            if output:
                if self.output_transaction:
                    # This needs to be imported here, because it relies on settings.
                    from django.db import connection
                    if connection.ops.start_transaction_sql():
                        print self.style.SQL_KEYWORD(connection.ops.start_transaction_sql())
                print output
                if self.output_transaction:
                    print self.style.SQL_KEYWORD("COMMIT;")
        except CommandError, e:
            sys.stderr.write(self.style.ERROR(str('Error: %s\n' % e)))
            sys.exit(1)
    def validate(self, app=None, display_num_errors=False):
        """
        Validates the given app, raising CommandError for any errors.
        
        If app is None, then this will validate all installed apps.
        
        """
        from django.core.management.validation import get_validation_errors
        try:
            from cStringIO import StringIO
        except ImportError:
            from StringIO import StringIO
        s = StringIO()
        num_errors = get_validation_errors(s, app)
        if num_errors:
            s.seek(0)
            error_text = s.read()
            raise CommandError("One or more models did not validate:\n%s" % error_text)
        if display_num_errors:
            print "%s error%s found" % (num_errors, num_errors != 1 and 's' or '')
    def handle(self, *args, **options):
        """
        The actual logic of the command. Subclasses must implement
        this method.
        
        """
        raise NotImplementedError()
class AppCommand(BaseCommand):
    """
    A management command which takes one or more installed application
    names as arguments, and does something with each of them.
    Rather than implementing ``handle()``, subclasses must implement
    ``handle_app()``, which will be called once for each application.
    
    """
    args = '<appname appname ...>'
    def handle(self, *app_labels, **options):
        from django.db import models
        if not app_labels:
            raise CommandError('Enter at least one appname.')
        try:
            app_list = [models.get_app(app_label) for app_label in app_labels]
        except (ImproperlyConfigured, ImportError), e:
            raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
        output = []
        for app in app_list:
            app_output = self.handle_app(app, **options)
            if app_output:
                output.append(app_output)
        return '\n'.join(output)
    def handle_app(self, app, **options):
        """
        Perform the command's actions for ``app``, which will be the
        Python module corresponding to an application name given on
        the command line.
        
        """
        raise NotImplementedError()
class LabelCommand(BaseCommand):
    """
    A management command which takes one or more arbitrary arguments
    (labels) on the command line, and does something with each of
    them.
    Rather than implementing ``handle()``, subclasses must implement
    ``handle_label()``, which will be called once for each label.
    If the arguments should be names of installed applications, use
    ``AppCommand`` instead.
    
    """
    args = '<label label ...>'
    label = 'label'
    def handle(self, *labels, **options):
        if not labels:
            raise CommandError('Enter at least one %s.' % self.label)
        output = []
        for label in labels:
            label_output = self.handle_label(label, **options)
            if label_output:
                output.append(label_output)
        return '\n'.join(output)
    def handle_label(self, label, **options):
        """
        Perform the command's actions for ``label``, which will be the
        string as given on the command line.
        
        """
        raise NotImplementedError()
class NoArgsCommand(BaseCommand):
    """
    A command which takes no arguments on the command line.
    Rather than implementing ``handle()``, subclasses must implement
    ``handle_noargs()``; ``handle()`` itself is overridden to ensure
    no arguments are passed to the command.
    Attempting to pass arguments will raise ``CommandError``.
    
    """
    args = ''
    def handle(self, *args, **options):
        if args:
            raise CommandError("Command doesn't accept any arguments")
        return self.handle_noargs(**options)
    def handle_noargs(self, **options):
        """
        Perform this command's actions.
        
        """
        raise NotImplementedError()
def copy_helper(style, app_or_project, name, directory, other_name=''):
    """
    Copies either a Django application layout template or a Django project
    layout template into the specified directory.
    """
    # style -- A color style object (see django.core.management.color).
    # app_or_project -- The string 'app' or 'project'.
    # name -- The name of the application or project.
    # directory -- The directory to which the layout template should be copied.
    # other_name -- When copying an application layout, this should be the name
    #               of the project.
    import re
    import shutil
    other = {'project': 'app', 'app': 'project'}[app_or_project]
    if not re.search(r'^[_a-zA-Z]\w*$', name): # If it's not a valid directory name.
        # Provide a smart error message, depending on the error.
        if not re.search(r'^[_a-zA-Z]', name):
            message = 'make sure the name begins with a letter or underscore'
        else:
            message = 'use only numbers, letters and underscores'
        raise CommandError("%r is not a valid %s name. Please %s." % (name, app_or_project, message))
    top_dir = os.path.join(directory, name)
    try:
        os.mkdir(top_dir)
    except OSError, e:
        raise CommandError(e)
    # Determine where the app or project templates are. Use
    # django.__path__[0] because we don't know into which directory
    # django has been installed.
    template_dir = os.path.join(django.__path__[0], 'conf', '%s_template' % app_or_project)
    for d, subdirs, files in os.walk(template_dir):
        relative_dir = d[len(template_dir)+1:].replace('%s_name' % app_or_project, name)
        if relative_dir:
            os.mkdir(os.path.join(top_dir, relative_dir))
        for i, subdir in enumerate(subdirs):
            if subdir.startswith('.'):
                del subdirs[i]
        for f in files:
            if f.endswith('.pyc'):
                continue
            path_old = os.path.join(d, f)
            path_new = os.path.join(top_dir, relative_dir, f.replace('%s_name' % app_or_project, name))
            fp_old = open(path_old, 'r')
            fp_new = open(path_new, 'w')
            fp_new.write(fp_old.read().replace('{{ %s_name }}' % app_or_project, name).replace('{{ %s_name }}' % other, other_name))
            fp_old.close()
            fp_new.close()
            try:
                shutil.copymode(path_old, path_new)
                _make_writeable(path_new)
            except OSError:
                sys.stderr.write(style.NOTICE("Notice: Couldn't set permission bits on %s. You're probably using an uncommon filesystem setup. No problem.\n" % path_new))
def _make_writeable(filename):
    """
    Make sure that the file is writeable. Useful if our source is
    read-only.
    
    """
    import stat
    if sys.platform.startswith('java'):
        # On Jython there is no os.access()
        return
    if not os.access(filename, os.W_OK):
        st = os.stat(filename)
        new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
        os.chmod(filename, new_permissions)
 | |
| 
	# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.go import target_type_rules
from pants.backend.go.goals.test import GoTestFieldSet
from pants.backend.go.goals.test import rules as _test_rules
from pants.backend.go.target_types import GoModTarget, GoPackageTarget
from pants.backend.go.util_rules import (
    assembly,
    build_pkg,
    build_pkg_target,
    first_party_pkg,
    go_mod,
    link,
    sdk,
    tests_analysis,
    third_party_pkg,
)
from pants.backend.go.util_rules.embedcfg import EmbedConfig
from pants.build_graph.address import Address
from pants.core.goals.test import TestResult
from pants.core.target_types import ResourceTarget
from pants.core.util_rules import source_files
from pants.testutil.rule_runner import QueryRule, RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
    rule_runner = RuleRunner(
        rules=[
            *_test_rules(),
            *assembly.rules(),
            *build_pkg.rules(),
            *build_pkg_target.rules(),
            *first_party_pkg.rules(),
            *go_mod.rules(),
            *link.rules(),
            *sdk.rules(),
            *target_type_rules.rules(),
            *tests_analysis.rules(),
            *third_party_pkg.rules(),
            *source_files.rules(),
            QueryRule(TestResult, [GoTestFieldSet]),
        ],
        target_types=[GoModTarget, GoPackageTarget, ResourceTarget],
    )
    rule_runner.set_options(["--go-test-args=-v -bench=."], env_inherit={"PATH"})
    return rule_runner
def test_merge_embedcfg() -> None:
    x = EmbedConfig(
        patterns={
            "*.go": ["foo.go", "bar.go"],
            "*.x": ["only_in_x"],
        },
        files={"foo.go": "path/to/foo.go", "bar.go": "path/to/bar.go", "only_in_x": "only_in_x"},
    )
    y = EmbedConfig(
        patterns={
            "*.go": ["foo.go", "bar.go"],
            "*.y": ["only_in_y"],
        },
        files={"foo.go": "path/to/foo.go", "bar.go": "path/to/bar.go", "only_in_y": "only_in_y"},
    )
    merged = x.merge(y)
    assert merged == EmbedConfig(
        patterns={
            "*.go": ["foo.go", "bar.go"],
            "*.x": ["only_in_x"],
            "*.y": ["only_in_y"],
        },
        files={
            "foo.go": "path/to/foo.go",
            "bar.go": "path/to/bar.go",
            "only_in_x": "only_in_x",
            "only_in_y": "only_in_y",
        },
    )
    a = EmbedConfig(
        patterns={
            "*.go": ["foo.go"],
        },
        files={"foo.go": "path/to/foo.go"},
    )
    b = EmbedConfig(
        patterns={
            "*.go": ["bar.go"],
        },
        files={"bar.go": "path/to/bar.go"},
    )
    with pytest.raises(AssertionError):
        _ = a.merge(b)
def test_embed_in_source_code(rule_runner: RuleRunner) -> None:
    rule_runner.write_files(
        {
            "BUILD": dedent(
                """
                go_mod(name='mod')
                go_package(name='pkg', dependencies=[":hello"])
                resource(name='hello', source='hello.txt')
                """
            ),
            "go.mod": dedent(
                """\
                module go.example.com/foo
                go 1.17
                """
            ),
            "hello.txt": "hello",
            "foo.go": dedent(
                """\
                package foo
                import _ "embed"
                //go:embed hello.txt
                var message string
                """
            ),
            "foo_test.go": dedent(
                """\
                package foo
                import "testing"
                func TestFoo(t *testing.T) {
                  if message != "hello" {
                    t.Fatalf("message mismatch: want=%s; got=%s", "hello", message)
                  }
                }
                """
            ),
        }
    )
    tgt = rule_runner.get_target(Address("", target_name="pkg"))
    result = rule_runner.request(TestResult, [GoTestFieldSet.create(tgt)])
    assert result.exit_code == 0
def test_embed_in_internal_test(rule_runner: RuleRunner) -> None:
    rule_runner.write_files(
        {
            "BUILD": dedent(
                """
                go_mod(name='mod')
                go_package(name='pkg', dependencies=[":hello"])
                resource(name='hello', source='hello.txt')
                """
            ),
            "go.mod": dedent(
                """\
                module go.example.com/foo
                go 1.17
                """
            ),
            "hello.txt": "hello",
            "foo.go": dedent(
                """\
                package foo
                """
            ),
            "foo_test.go": dedent(
                """\
                package foo
                import (
                  _ "embed"
                  "testing"
                )
                //go:embed hello.txt
                var testMessage string
                func TestFoo(t *testing.T) {
                  if testMessage != "hello" {
                    t.Fatalf("testMessage mismatch: want=%s; got=%s", "hello", testMessage)
                  }
                }
                """
            ),
        }
    )
    tgt = rule_runner.get_target(Address("", target_name="pkg"))
    result = rule_runner.request(TestResult, [GoTestFieldSet.create(tgt)])
    assert result.exit_code == 0
def test_embed_in_external_test(rule_runner: RuleRunner) -> None:
    rule_runner.write_files(
        {
            "BUILD": dedent(
                """
                go_mod(name='mod')
                go_package(name='pkg', dependencies=[":hello"])
                resource(name='hello', source='hello.txt')
                """
            ),
            "go.mod": dedent(
                """\
                module go.example.com/foo
                go 1.17
                """
            ),
            "hello.txt": "hello",
            "foo.go": dedent(
                """\
                package foo
                """
            ),
            "bar_test.go": dedent(
                """\
                package foo_test
                import (
                  _ "embed"
                  "testing"
                )
                //go:embed hello.txt
                var testMessage string
                func TestBar(t *testing.T) {
                  if testMessage != "hello" {
                    t.Fatalf("testMessage mismatch: want=%s; got=%s", "hello", testMessage)
                  }
                }
                """
            ),
        }
    )
    tgt = rule_runner.get_target(Address("", target_name="pkg"))
    result = rule_runner.request(TestResult, [GoTestFieldSet.create(tgt)])
    assert result.exit_code == 0
 | |
| 
	#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.
import mock
from oslo_serialization import jsonutils
import six
import testscenarios
import webob
from nova.api.openstack import api_version_request as api_version
from nova.api.openstack import extensions
from nova.api.openstack import versioned_method
from nova.api.openstack import wsgi
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import matchers
from nova.tests.unit import utils
class MicroversionedTest(testscenarios.WithScenarios, test.NoDBTestCase):
    scenarios = [
        ('legacy-microversion', {
            'header_name': 'X-OpenStack-Nova-API-Version',
        }),
        ('modern-microversion', {
            'header_name': 'OpenStack-API-Version',
        })
    ]
    def _make_microversion_header(self, value):
        if 'nova' in self.header_name.lower():
            return {self.header_name: value}
        else:
            return {self.header_name: 'compute %s' % value}
class RequestTest(MicroversionedTest):
    def setUp(self):
        super(RequestTest, self).setUp()
        self.stub_out('nova.i18n.get_available_languages',
                      lambda *args, **kwargs:
                      ['en_GB', 'en_AU', 'de', 'zh_CN', 'en_US'])
    def test_content_type_missing(self):
        request = wsgi.Request.blank('/tests/123', method='POST')
        request.body = b"<body />"
        self.assertIsNone(request.get_content_type())
    def test_content_type_unsupported(self):
        request = wsgi.Request.blank('/tests/123', method='POST')
        request.headers["Content-Type"] = "text/html"
        request.body = b"asdf<br />"
        self.assertRaises(exception.InvalidContentType,
                          request.get_content_type)
    def test_content_type_with_charset(self):
        request = wsgi.Request.blank('/tests/123')
        request.headers["Content-Type"] = "application/json; charset=UTF-8"
        result = request.get_content_type()
        self.assertEqual(result, "application/json")
    def test_content_type_accept_default(self):
        request = wsgi.Request.blank('/tests/123.unsupported')
        request.headers["Accept"] = "application/unsupported1"
        result = request.best_match_content_type()
        self.assertEqual(result, "application/json")
    def test_cache_and_retrieve_instances(self):
        request = wsgi.Request.blank('/foo')
        instances = []
        for x in range(3):
            instances.append({'uuid': 'uuid%s' % x})
        # Store 2
        request.cache_db_instances(instances[:2])
        # Store 1
        request.cache_db_instance(instances[2])
        self.assertEqual(request.get_db_instance('uuid0'),
                instances[0])
        self.assertEqual(request.get_db_instance('uuid1'),
                instances[1])
        self.assertEqual(request.get_db_instance('uuid2'),
                instances[2])
        self.assertIsNone(request.get_db_instance('uuid3'))
        self.assertEqual(request.get_db_instances(),
                {'uuid0': instances[0],
                 'uuid1': instances[1],
                 'uuid2': instances[2]})
    def test_from_request(self):
        request = wsgi.Request.blank('/')
        accepted = 'bogus;q=1.1, en-gb;q=0.7,en-us,en;q=.5,*;q=.7'
        request.headers = {'Accept-Language': accepted}
        self.assertEqual(request.best_match_language(), 'en_US')
    def test_asterisk(self):
        # asterisk should match first available if there
        # are not any other available matches
        request = wsgi.Request.blank('/')
        accepted = '*,es;q=.5'
        request.headers = {'Accept-Language': accepted}
        self.assertEqual(request.best_match_language(), 'en_GB')
    def test_prefix(self):
        request = wsgi.Request.blank('/')
        accepted = 'zh'
        request.headers = {'Accept-Language': accepted}
        self.assertEqual(request.best_match_language(), 'zh_CN')
    def test_secondary(self):
        request = wsgi.Request.blank('/')
        accepted = 'nn,en-gb;q=.5'
        request.headers = {'Accept-Language': accepted}
        self.assertEqual(request.best_match_language(), 'en_GB')
    def test_none_found(self):
        request = wsgi.Request.blank('/')
        accepted = 'nb-no'
        request.headers = {'Accept-Language': accepted}
        self.assertIsNone(request.best_match_language())
    def test_no_lang_header(self):
        request = wsgi.Request.blank('/')
        accepted = ''
        request.headers = {'Accept-Language': accepted}
        self.assertIsNone(request.best_match_language())
    def test_api_version_request_header_none(self):
        request = wsgi.Request.blank('/')
        request.set_api_version_request()
        self.assertEqual(api_version.APIVersionRequest(
            api_version.DEFAULT_API_VERSION), request.api_version_request)
    @mock.patch("nova.api.openstack.api_version_request.max_api_version")
    def test_api_version_request_header(self, mock_maxver):
        mock_maxver.return_value = api_version.APIVersionRequest("2.14")
        request = wsgi.Request.blank('/')
        request.headers = self._make_microversion_header('2.14')
        request.set_api_version_request()
        self.assertEqual(api_version.APIVersionRequest("2.14"),
                         request.api_version_request)
    @mock.patch("nova.api.openstack.api_version_request.max_api_version")
    def test_api_version_request_header_latest(self, mock_maxver):
        mock_maxver.return_value = api_version.APIVersionRequest("3.5")
        request = wsgi.Request.blank('/')
        request.headers = self._make_microversion_header('latest')
        request.set_api_version_request()
        self.assertEqual(api_version.APIVersionRequest("3.5"),
                         request.api_version_request)
    def test_api_version_request_header_invalid(self):
        request = wsgi.Request.blank('/')
        request.headers = self._make_microversion_header('2.1.3')
        self.assertRaises(exception.InvalidAPIVersionString,
                          request.set_api_version_request)
class ActionDispatcherTest(test.NoDBTestCase):
    def test_dispatch(self):
        serializer = wsgi.ActionDispatcher()
        serializer.create = lambda x: 'pants'
        self.assertEqual(serializer.dispatch({}, action='create'), 'pants')
    def test_dispatch_action_None(self):
        serializer = wsgi.ActionDispatcher()
        serializer.create = lambda x: 'pants'
        serializer.default = lambda x: 'trousers'
        self.assertEqual(serializer.dispatch({}, action=None), 'trousers')
    def test_dispatch_default(self):
        serializer = wsgi.ActionDispatcher()
        serializer.create = lambda x: 'pants'
        serializer.default = lambda x: 'trousers'
        self.assertEqual(serializer.dispatch({}, action='update'), 'trousers')
class JSONDictSerializerTest(test.NoDBTestCase):
    def test_json(self):
        input_dict = dict(servers=dict(a=(2, 3)))
        expected_json = '{"servers":{"a":[2,3]}}'
        serializer = wsgi.JSONDictSerializer()
        result = serializer.serialize(input_dict)
        result = result.replace('\n', '').replace(' ', '')
        self.assertEqual(result, expected_json)
class JSONDeserializerTest(test.NoDBTestCase):
    def test_json(self):
        data = """{"a": {
                "a1": "1",
                "a2": "2",
                "bs": ["1", "2", "3", {"c": {"c1": "1"}}],
                "d": {"e": "1"},
                "f": "1"}}"""
        as_dict = {
            'body': {
                'a': {
                    'a1': '1',
                    'a2': '2',
                    'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
                    'd': {'e': '1'},
                    'f': '1',
                },
            },
        }
        deserializer = wsgi.JSONDeserializer()
        self.assertEqual(deserializer.deserialize(data), as_dict)
    def test_json_valid_utf8(self):
        data = b"""{"server": {"min_count": 1, "flavorRef": "1",
                "name": "\xe6\xa6\x82\xe5\xbf\xb5",
                "imageRef": "10bab10c-1304-47d",
                "max_count": 1}} """
        as_dict = {
            'body': {
                u'server': {
                            u'min_count': 1, u'flavorRef': u'1',
                            u'name': u'\u6982\u5ff5',
                            u'imageRef': u'10bab10c-1304-47d',
                            u'max_count': 1
                           }
                    }
            }
        deserializer = wsgi.JSONDeserializer()
        self.assertEqual(deserializer.deserialize(data), as_dict)
    def test_json_invalid_utf8(self):
        """Send invalid utf-8 to JSONDeserializer."""
        data = b"""{"server": {"min_count": 1, "flavorRef": "1",
                "name": "\xf0\x28\x8c\x28",
                "imageRef": "10bab10c-1304-47d",
                "max_count": 1}} """
        deserializer = wsgi.JSONDeserializer()
        self.assertRaises(exception.MalformedRequestBody,
                          deserializer.deserialize, data)
class ResourceTest(MicroversionedTest):
    def get_req_id_header_name(self, request):
        header_name = 'x-openstack-request-id'
        if utils.get_api_version(request) < 3:
                header_name = 'x-compute-request-id'
        return header_name
    def test_resource_receives_api_version_request_default(self):
        class Controller(object):
            def index(self, req):
                if req.api_version_request != \
                  api_version.APIVersionRequest(
                      api_version.DEFAULT_API_VERSION):
                    raise webob.exc.HTTPInternalServerError()
                return 'success'
        app = fakes.TestRouterV21(Controller())
        req = webob.Request.blank('/tests')
        response = req.get_response(app)
        self.assertEqual(b'success', response.body)
        self.assertEqual(response.status_int, 200)
    @mock.patch("nova.api.openstack.api_version_request.max_api_version")
    def test_resource_receives_api_version_request(self, mock_maxver):
        version = "2.5"
        mock_maxver.return_value = api_version.APIVersionRequest(version)
        class Controller(object):
            def index(self, req):
                if req.api_version_request != \
                  api_version.APIVersionRequest(version):
                    raise webob.exc.HTTPInternalServerError()
                return 'success'
        app = fakes.TestRouterV21(Controller())
        req = webob.Request.blank('/tests')
        req.headers = self._make_microversion_header(version)
        response = req.get_response(app)
        self.assertEqual(b'success', response.body)
        self.assertEqual(response.status_int, 200)
    def test_resource_receives_api_version_request_invalid(self):
        invalid_version = "2.5.3"
        class Controller(object):
            def index(self, req):
                return 'success'
        app = fakes.TestRouterV21(Controller())
        req = webob.Request.blank('/tests')
        req.headers = self._make_microversion_header(invalid_version)
        response = req.get_response(app)
        self.assertEqual(400, response.status_int)
    def test_resource_call_with_method_get(self):
        class Controller(object):
            def index(self, req):
                return 'success'
        app = fakes.TestRouter(Controller())
        # the default method is GET
        req = webob.Request.blank('/tests')
        response = req.get_response(app)
        self.assertEqual(b'success', response.body)
        self.assertEqual(response.status_int, 200)
        req.body = b'{"body": {"key": "value"}}'
        response = req.get_response(app)
        self.assertEqual(b'success', response.body)
        self.assertEqual(response.status_int, 200)
        req.content_type = 'application/json'
        response = req.get_response(app)
        self.assertEqual(b'success', response.body)
        self.assertEqual(response.status_int, 200)
    def test_resource_call_with_method_post(self):
        class Controller(object):
            @extensions.expected_errors(400)
            def create(self, req, body):
                if expected_body != body:
                    msg = "The request body invalid"
                    raise webob.exc.HTTPBadRequest(explanation=msg)
                return "success"
        # verify the method: POST
        app = fakes.TestRouter(Controller())
        req = webob.Request.blank('/tests', method="POST",
                                  content_type='application/json')
        req.body = b'{"body": {"key": "value"}}'
        expected_body = {'body': {
            "key": "value"
            }
        }
        response = req.get_response(app)
        self.assertEqual(response.status_int, 200)
        self.assertEqual(b'success', response.body)
        # verify without body
        expected_body = None
        req.body = None
        response = req.get_response(app)
        self.assertEqual(response.status_int, 200)
        self.assertEqual(b'success', response.body)
        # the body is validated in the controller
        expected_body = {'body': None}
        response = req.get_response(app)
        expected_unsupported_type_body = {'badRequest':
            {'message': 'The request body invalid', 'code': 400}}
        self.assertEqual(response.status_int, 400)
        self.assertEqual(expected_unsupported_type_body,
                         jsonutils.loads(response.body))
    def test_resource_call_with_method_put(self):
        class Controller(object):
            def update(self, req, id, body):
                if expected_body != body:
                    msg = "The request body invalid"
                    raise webob.exc.HTTPBadRequest(explanation=msg)
                return "success"
        # verify the method: PUT
        app = fakes.TestRouter(Controller())
        req = webob.Request.blank('/tests/test_id', method="PUT",
                                  content_type='application/json')
        req.body = b'{"body": {"key": "value"}}'
        expected_body = {'body': {
            "key": "value"
            }
        }
        response = req.get_response(app)
        self.assertEqual(b'success', response.body)
        self.assertEqual(response.status_int, 200)
        req.body = None
        expected_body = None
        response = req.get_response(app)
        self.assertEqual(response.status_int, 200)
        # verify no content_type is contained in the request
        req = webob.Request.blank('/tests/test_id', method="PUT",
                                  content_type='application/xml')
        req.content_type = 'application/xml'
        req.body = b'{"body": {"key": "value"}}'
        response = req.get_response(app)
        expected_unsupported_type_body = {'badMediaType':
            {'message': 'Unsupported Content-Type', 'code': 415}}
        self.assertEqual(response.status_int, 415)
        self.assertEqual(expected_unsupported_type_body,
                         jsonutils.loads(response.body))
    def test_resource_call_with_method_delete(self):
        class Controller(object):
            def delete(self, req, id):
                return "success"
        # verify the method: DELETE
        app = fakes.TestRouter(Controller())
        req = webob.Request.blank('/tests/test_id', method="DELETE")
        response = req.get_response(app)
        self.assertEqual(response.status_int, 200)
        self.assertEqual(b'success', response.body)
        # ignore the body
        req.body = b'{"body": {"key": "value"}}'
        response = req.get_response(app)
        self.assertEqual(response.status_int, 200)
        self.assertEqual(b'success', response.body)
    def test_resource_forbidden(self):
        class Controller(object):
            def index(self, req):
                raise exception.Forbidden()
        req = webob.Request.blank('/tests')
        app = fakes.TestRouter(Controller())
        response = req.get_response(app)
        self.assertEqual(response.status_int, 403)
    def test_resource_not_authorized(self):
        class Controller(object):
            def index(self, req):
                raise exception.Unauthorized()
        req = webob.Request.blank('/tests')
        app = fakes.TestRouter(Controller())
        self.assertRaises(
            exception.Unauthorized, req.get_response, app)
    def test_dispatch(self):
        class Controller(object):
            def index(self, req, pants=None):
                return pants
        controller = Controller()
        resource = wsgi.Resource(controller)
        method, extensions = resource.get_method(None, 'index', None, '')
        actual = resource.dispatch(method, None, {'pants': 'off'})
        expected = 'off'
        self.assertEqual(actual, expected)
    def test_get_method_unknown_controller_method(self):
        class Controller(object):
            def index(self, req, pants=None):
                return pants
        controller = Controller()
        resource = wsgi.Resource(controller)
        self.assertRaises(AttributeError, resource.get_method,
                          None, 'create', None, '')
    def test_get_method_action_json(self):
        class Controller(wsgi.Controller):
            @wsgi.action('fooAction')
            def _action_foo(self, req, id, body):
                return body
        controller = Controller()
        resource = wsgi.Resource(controller)
        method, extensions = resource.get_method(None, 'action',
                                                 'application/json',
                                                 '{"fooAction": true}')
        self.assertEqual(controller._action_foo, method)
    def test_get_method_action_bad_body(self):
        class Controller(wsgi.Controller):
            @wsgi.action('fooAction')
            def _action_foo(self, req, id, body):
                return body
        controller = Controller()
        resource = wsgi.Resource(controller)
        self.assertRaises(exception.MalformedRequestBody, resource.get_method,
                          None, 'action', 'application/json', '{}')
    def test_get_method_unknown_controller_action(self):
        class Controller(wsgi.Controller):
            @wsgi.action('fooAction')
            def _action_foo(self, req, id, body):
                return body
        controller = Controller()
        resource = wsgi.Resource(controller)
        self.assertRaises(KeyError, resource.get_method,
                          None, 'action', 'application/json',
                          '{"barAction": true}')
    def test_get_method_action_method(self):
        class Controller(object):
            def action(self, req, pants=None):
                return pants
        controller = Controller()
        resource = wsgi.Resource(controller)
        method, extensions = resource.get_method(None, 'action',
                                                 'application/xml',
                                                 '<fooAction>true</fooAction')
        self.assertEqual(controller.action, method)
    def test_get_action_args(self):
        class Controller(object):
            def index(self, req, pants=None):
                return pants
        controller = Controller()
        resource = wsgi.Resource(controller)
        env = {
            'wsgiorg.routing_args': [None, {
                'controller': None,
                'format': None,
                'action': 'update',
                'id': 12,
            }],
        }
        expected = {'action': 'update', 'id': 12}
        self.assertEqual(resource.get_action_args(env), expected)
    def test_get_body_bad_content(self):
        class Controller(object):
            def index(self, req, pants=None):
                return pants
        controller = Controller()
        resource = wsgi.Resource(controller)
        request = wsgi.Request.blank('/', method='POST')
        request.headers['Content-Type'] = 'application/none'
        request.body = b'foo'
        self.assertRaises(exception.InvalidContentType,
                          resource.get_body, request)
    def test_get_body_no_content_type(self):
        class Controller(object):
            def index(self, req, pants=None):
                return pants
        controller = Controller()
        resource = wsgi.Resource(controller)
        request = wsgi.Request.blank('/', method='POST')
        request.body = b'foo'
        content_type, body = resource.get_body(request)
        self.assertIsNone(content_type)
        self.assertEqual(b'foo', body)
    def test_get_body_no_content_body(self):
        class Controller(object):
            def index(self, req, pants=None):
                return pants
        controller = Controller()
        resource = wsgi.Resource(controller)
        request = wsgi.Request.blank('/', method='POST')
        request.headers['Content-Type'] = 'application/json'
        request.body = b''
        content_type, body = resource.get_body(request)
        self.assertEqual('application/json', content_type)
        self.assertEqual(b'', body)
    def test_get_body_content_body_none(self):
        resource = wsgi.Resource(None)
        request = wsgi.Request.blank('/', method='PUT')
        body = None
        contents = resource._get_request_content(body, request)
        self.assertIn('body', contents)
        self.assertIsNone(contents['body'])
    def test_get_body(self):
        class Controller(object):
            def index(self, req, pants=None):
                return pants
        controller = Controller()
        resource = wsgi.Resource(controller)
        request = wsgi.Request.blank('/', method='POST')
        request.headers['Content-Type'] = 'application/json'
        request.body = b'foo'
        content_type, body = resource.get_body(request)
        self.assertEqual(content_type, 'application/json')
        self.assertEqual(b'foo', body)
    def test_get_request_id_with_dict_response_body(self):
        class Controller(wsgi.Controller):
            def index(self, req):
                return {'foo': 'bar'}
        req = fakes.HTTPRequest.blank('/tests')
        app = fakes.TestRouter(Controller())
        response = req.get_response(app)
        self.assertIn('nova.context', req.environ)
        self.assertEqual(b'{"foo": "bar"}', response.body)
        self.assertEqual(response.status_int, 200)
    def test_no_request_id_with_str_response_body(self):
        class Controller(wsgi.Controller):
            def index(self, req):
                return 'foo'
        req = fakes.HTTPRequest.blank('/tests')
        app = fakes.TestRouter(Controller())
        response = req.get_response(app)
        # NOTE(alaski): This test is really to ensure that a str response
        # doesn't error.  Not having a request_id header is a side effect of
        # our wsgi setup, ideally it would be there.
        expected_header = self.get_req_id_header_name(req)
        self.assertFalse(hasattr(response.headers, expected_header))
        self.assertEqual(b'foo', response.body)
        self.assertEqual(response.status_int, 200)
    def test_get_request_id_no_response_body(self):
        class Controller(object):
            def index(self, req):
                pass
        req = fakes.HTTPRequest.blank('/tests')
        app = fakes.TestRouter(Controller())
        response = req.get_response(app)
        self.assertIn('nova.context', req.environ)
        self.assertEqual(b'', response.body)
        self.assertEqual(response.status_int, 200)
    def test_deserialize_default(self):
        class Controller(object):
            def index(self, req, pants=None):
                return pants
        controller = Controller()
        resource = wsgi.Resource(controller)
        obj = resource.deserialize('["foo"]')
        self.assertEqual(obj, {'body': ['foo']})
    def test_register_actions(self):
        class Controller(object):
            def index(self, req, pants=None):
                return pants
        class ControllerExtended(wsgi.Controller):
            @wsgi.action('fooAction')
            def _action_foo(self, req, id, body):
                return body
            @wsgi.action('barAction')
            def _action_bar(self, req, id, body):
                return body
        controller = Controller()
        resource = wsgi.Resource(controller)
        self.assertEqual({}, resource.wsgi_actions)
        extended = ControllerExtended()
        resource.register_actions(extended)
        self.assertEqual({
                'fooAction': extended._action_foo,
                'barAction': extended._action_bar,
                }, resource.wsgi_actions)
    def test_register_extensions(self):
        class Controller(object):
            def index(self, req, pants=None):
                return pants
        class ControllerExtended(wsgi.Controller):
            @wsgi.extends
            def index(self, req, resp_obj, pants=None):
                return None
            @wsgi.extends(action='fooAction')
            def _action_foo(self, req, resp, id, body):
                return None
        controller = Controller()
        resource = wsgi.Resource(controller)
        self.assertEqual({}, resource.wsgi_extensions)
        self.assertEqual({}, resource.wsgi_action_extensions)
        extended = ControllerExtended()
        resource.register_extensions(extended)
        self.assertEqual({'index': [extended.index]}, resource.wsgi_extensions)
        self.assertEqual({'fooAction': [extended._action_foo]},
                         resource.wsgi_action_extensions)
    def test_get_method_extensions(self):
        class Controller(object):
            def index(self, req, pants=None):
                return pants
        class ControllerExtended(wsgi.Controller):
            @wsgi.extends
            def index(self, req, resp_obj, pants=None):
                return None
        controller = Controller()
        extended = ControllerExtended()
        resource = wsgi.Resource(controller)
        resource.register_extensions(extended)
        method, extensions = resource.get_method(None, 'index', None, '')
        self.assertEqual(method, controller.index)
        self.assertEqual(extensions, [extended.index])
    def test_get_method_action_extensions(self):
        class Controller(wsgi.Controller):
            def index(self, req, pants=None):
                return pants
            @wsgi.action('fooAction')
            def _action_foo(self, req, id, body):
                return body
        class ControllerExtended(wsgi.Controller):
            @wsgi.extends(action='fooAction')
            def _action_foo(self, req, resp_obj, id, body):
                return None
        controller = Controller()
        extended = ControllerExtended()
        resource = wsgi.Resource(controller)
        resource.register_extensions(extended)
        method, extensions = resource.get_method(None, 'action',
                                                 'application/json',
                                                 '{"fooAction": true}')
        self.assertEqual(method, controller._action_foo)
        self.assertEqual(extensions, [extended._action_foo])
    def test_get_method_action_whitelist_extensions(self):
        class Controller(wsgi.Controller):
            def index(self, req, pants=None):
                return pants
        class ControllerExtended(wsgi.Controller):
            @wsgi.action('create')
            def _create(self, req, body):
                pass
            @wsgi.action('delete')
            def _delete(self, req, id):
                pass
        controller = Controller()
        extended = ControllerExtended()
        resource = wsgi.Resource(controller)
        resource.register_actions(extended)
        method, extensions = resource.get_method(None, 'create',
                                                 'application/json',
                                                 '{"create": true}')
        self.assertEqual(method, extended._create)
        self.assertEqual(extensions, [])
        method, extensions = resource.get_method(None, 'delete', None, None)
        self.assertEqual(method, extended._delete)
        self.assertEqual(extensions, [])
    def test_process_extensions_regular(self):
        class Controller(object):
            def index(self, req, pants=None):
                return pants
        controller = Controller()
        resource = wsgi.Resource(controller)
        called = []
        def extension1(req, resp_obj):
            called.append(1)
            return None
        def extension2(req, resp_obj):
            called.append(2)
            return None
        response = resource.process_extensions([extension2, extension1],
                                                    None, None, {})
        self.assertEqual(called, [2, 1])
        self.assertIsNone(response)
    def test_process_extensions_regular_response(self):
        class Controller(object):
            def index(self, req, pants=None):
                return pants
        controller = Controller()
        resource = wsgi.Resource(controller)
        called = []
        def extension1(req, resp_obj):
            called.append(1)
            return None
        def extension2(req, resp_obj):
            called.append(2)
            return 'foo'
        response = resource.process_extensions([extension2, extension1],
                                                    None, None, {})
        self.assertEqual(called, [2])
        self.assertEqual(response, 'foo')
    def test_resource_exception_handler_type_error(self):
        # A TypeError should be translated to a Fault/HTTP 400.
        def foo(a,):
            return a
        try:
            with wsgi.ResourceExceptionHandler():
                foo()  # generate a TypeError
            self.fail("Should have raised a Fault (HTTP 400)")
        except wsgi.Fault as fault:
            self.assertEqual(400, fault.status_int)
    def test_resource_headers_are_utf8(self):
        resp = webob.Response(status_int=202)
        resp.headers['x-header1'] = 1
        resp.headers['x-header2'] = u'header2'
        resp.headers['x-header3'] = u'header3'
        class Controller(object):
            def index(self, req):
                return resp
        req = webob.Request.blank('/tests')
        app = fakes.TestRouter(Controller())
        response = req.get_response(app)
        for val in six.itervalues(response.headers):
            # All headers must be utf8
            self.assertThat(val, matchers.EncodedByUTF8())
        self.assertEqual('1', response.headers['x-header1'])
        self.assertEqual('header2', response.headers['x-header2'])
        self.assertEqual('header3', response.headers['x-header3'])
    def test_resource_valid_utf8_body(self):
        class Controller(object):
            def update(self, req, id, body):
                return body
        req = webob.Request.blank('/tests/test_id', method="PUT")
        body = b""" {"name": "\xe6\xa6\x82\xe5\xbf\xb5" } """
        expected_body = b'{"name": "\\u6982\\u5ff5"}'
        req.body = body
        req.headers['Content-Type'] = 'application/json'
        app = fakes.TestRouter(Controller())
        response = req.get_response(app)
        self.assertEqual(response.body, expected_body)
        self.assertEqual(response.status_int, 200)
    def test_resource_invalid_utf8(self):
        class Controller(object):
            def update(self, req, id, body):
                return body
        req = webob.Request.blank('/tests/test_id', method="PUT")
        body = b""" {"name": "\xf0\x28\x8c\x28" } """
        req.body = body
        req.headers['Content-Type'] = 'application/json'
        app = fakes.TestRouter(Controller())
        self.assertRaises(UnicodeDecodeError, req.get_response, app)
class ResponseObjectTest(test.NoDBTestCase):
    def test_default_code(self):
        robj = wsgi.ResponseObject({})
        self.assertEqual(robj.code, 200)
    def test_modified_code(self):
        robj = wsgi.ResponseObject({})
        robj._default_code = 202
        self.assertEqual(robj.code, 202)
    def test_override_default_code(self):
        robj = wsgi.ResponseObject({}, code=404)
        self.assertEqual(robj.code, 404)
    def test_override_modified_code(self):
        robj = wsgi.ResponseObject({}, code=404)
        robj._default_code = 202
        self.assertEqual(robj.code, 404)
    def test_set_header(self):
        robj = wsgi.ResponseObject({})
        robj['Header'] = 'foo'
        self.assertEqual(robj.headers, {'header': 'foo'})
    def test_get_header(self):
        robj = wsgi.ResponseObject({})
        robj['Header'] = 'foo'
        self.assertEqual(robj['hEADER'], 'foo')
    def test_del_header(self):
        robj = wsgi.ResponseObject({})
        robj['Header'] = 'foo'
        del robj['hEADER']
        self.assertNotIn('header', robj.headers)
    def test_header_isolation(self):
        robj = wsgi.ResponseObject({})
        robj['Header'] = 'foo'
        hdrs = robj.headers
        hdrs['hEADER'] = 'bar'
        self.assertEqual(robj['hEADER'], 'foo')
class ValidBodyTest(test.NoDBTestCase):
    def setUp(self):
        super(ValidBodyTest, self).setUp()
        self.controller = wsgi.Controller()
    def test_is_valid_body(self):
        body = {'foo': {}}
        self.assertTrue(self.controller.is_valid_body(body, 'foo'))
    def test_is_valid_body_none(self):
        wsgi.Resource(controller=None)
        self.assertFalse(self.controller.is_valid_body(None, 'foo'))
    def test_is_valid_body_empty(self):
        wsgi.Resource(controller=None)
        self.assertFalse(self.controller.is_valid_body({}, 'foo'))
    def test_is_valid_body_no_entity(self):
        wsgi.Resource(controller=None)
        body = {'bar': {}}
        self.assertFalse(self.controller.is_valid_body(body, 'foo'))
    def test_is_valid_body_malformed_entity(self):
        wsgi.Resource(controller=None)
        body = {'foo': 'bar'}
        self.assertFalse(self.controller.is_valid_body(body, 'foo'))
class TestController(test.NoDBTestCase):
    def test_check_for_versions_intersection_negative(self):
        func_list = \
            [versioned_method.VersionedMethod('foo',
                                              api_version.APIVersionRequest(
                                                  '2.1'),
                                              api_version.APIVersionRequest(
                                                  '2.4'),
                                              None),
             versioned_method.VersionedMethod('foo',
                                              api_version.APIVersionRequest(
                                                  '2.11'),
                                              api_version.APIVersionRequest(
                                                  '3.1'),
                                              None),
             versioned_method.VersionedMethod('foo',
                                              api_version.APIVersionRequest(
                                                  '2.8'),
                                              api_version.APIVersionRequest(
                                                  '2.9'),
                                              None),
             ]
        result = wsgi.Controller.check_for_versions_intersection(func_list=
                                                                 func_list)
        self.assertFalse(result)
        func_list = \
            [versioned_method.VersionedMethod('foo',
                                              api_version.APIVersionRequest(
                                                  '2.12'),
                                              api_version.APIVersionRequest(
                                                  '2.14'),
                                              None),
             versioned_method.VersionedMethod('foo',
                                              api_version.APIVersionRequest(
                                                  '3.0'),
                                              api_version.APIVersionRequest(
                                                  '3.4'),
                                              None)
             ]
        result = wsgi.Controller.check_for_versions_intersection(func_list=
                                                                 func_list)
        self.assertFalse(result)
    def test_check_for_versions_intersection_positive(self):
        func_list = \
            [versioned_method.VersionedMethod('foo',
                                              api_version.APIVersionRequest(
                                                  '2.1'),
                                              api_version.APIVersionRequest(
                                                  '2.4'),
                                              None),
             versioned_method.VersionedMethod('foo',
                                              api_version.APIVersionRequest(
                                                  '2.3'),
                                              api_version.APIVersionRequest(
                                                  '3.0'),
                                              None),
             versioned_method.VersionedMethod('foo',
                                              api_version.APIVersionRequest(
                                                  '2.8'),
                                              api_version.APIVersionRequest(
                                                  '2.9'),
                                              None),
             ]
        result = wsgi.Controller.check_for_versions_intersection(func_list=
                                                                 func_list)
        self.assertTrue(result)
 | |
| 
	##########################################################################
#  
#  Copyright (c) 2011-2012, John Haddon. All rights reserved.
#  Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#  
#  Redistribution and use in source and binary forms, with or without
#  modification, are permitted provided that the following conditions are
#  met:
#  
#      * Redistributions of source code must retain the above
#        copyright notice, this list of conditions and the following
#        disclaimer.
#  
#      * Redistributions in binary form must reproduce the above
#        copyright notice, this list of conditions and the following
#        disclaimer in the documentation and/or other materials provided with
#        the distribution.
#  
#      * Neither the name of John Haddon nor the names of
#        any other contributors to this software may be used to endorse or
#        promote products derived from this software without specific prior
#        written permission.
#  
#  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
#  IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
#  THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
#  PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
#  CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
#  EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
#  PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
#  PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
#  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
#  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#  
##########################################################################
from __future__ import with_statement
import unittest
import weakref
import IECore
import Gaffer
import GafferUI
import GafferTest
import GafferUITest
class NestedPlugTestNode( Gaffer.Node ) :
		
	def __init__( self ) :
			
		Gaffer.Node.__init__( self )
	
IECore.registerRunTimeTyped( NestedPlugTestNode )
GafferUI.Nodule.registerNodule( NestedPlugTestNode.staticTypeId(), "c", GafferUI.CompoundNodule )
class NodeGraphTest( GafferUITest.TestCase ) :
	def testCreateWithExistingGraph( self ) :
	
		s = Gaffer.ScriptNode()
		
		s["add1"] = GafferTest.AddNode()
		s["add2"] = GafferTest.AddNode()
		
		s["add1"]["op1"].setInput( s["add2"]["sum"] )
		
		g = GafferUI.NodeGraph( s )
		
		self.failUnless( g.graphGadget().nodeGadget( s["add1"] ).node() is s["add1"] )
		self.failUnless( g.graphGadget().nodeGadget( s["add2"] ).node() is s["add2"] )
	
		self.failUnless( g.graphGadget().connectionGadget( s["add1"]["op1"] ).dstNodule().plug().isSame( s["add1"]["op1"] ) )
				
	def testGraphGadgetAccess( self ) :
	
		s = Gaffer.ScriptNode()
		ge = GafferUI.NodeGraph( s )
		
		g = ge.graphGadget()
		
		self.failUnless( isinstance( g, GafferUI.GraphGadget ) )
	
	def testRemovedNodesDontHaveGadgets( self ) :
	
		s = Gaffer.ScriptNode()
		g = GafferUI.GraphGadget( s )
		
		n = GafferTest.AddNode()
		s["add1"] = n
		
		self.failUnless( g.nodeGadget( n ) is not None )
		
		s.deleteNodes( filter = Gaffer.StandardSet( [ n ] ) )
		self.failUnless( g.nodeGadget( n ) is None )
	
	def testRemovedNodesDontHaveConnections( self ) :
	
		s = Gaffer.ScriptNode()
		
		n = GafferTest.AddNode()
		s["add1"] = n
		s["add2"] = GafferTest.AddNode()
		
		s["add1"]["op1"].setInput( s["add2"]["sum"] )
		
		g = GafferUI.NodeGraph( s )
		s.deleteNodes( filter = Gaffer.StandardSet( [ s["add1"] ] ) )
		
		self.failIf( g.graphGadget().connectionGadget( n["op1"] ) )
	def testCreateWithFilter( self ) :
	
		script = Gaffer.ScriptNode()
		
		script["add1"] = GafferTest.AddNode()
		script["add2"] = GafferTest.AddNode()
		
		nodeFilter = Gaffer.StandardSet( [ script["add2"] ] )
		
		g = GafferUI.GraphGadget( script, nodeFilter )
		
		self.failIf( g.nodeGadget( script["add1"] ) )
		self.failUnless( g.nodeGadget( script["add2"] ) )
	
	def testEditFilter( self ) :
		
		script = Gaffer.ScriptNode()
		
		script["add1"] = GafferTest.AddNode()
		script["add2"] = GafferTest.AddNode()
		nodeFilter = Gaffer.StandardSet( script.children() )
		
		g = GafferUI.GraphGadget( script, nodeFilter )
		
		self.failUnless( g.nodeGadget( script["add1"] ) )
		self.failUnless( g.nodeGadget( script["add2"] ) )
			
		nodeFilter.remove( script["add1"] )
			
		self.failIf( g.nodeGadget( script["add1"] ) )
		self.failUnless( g.nodeGadget( script["add2"] ) )
		
		nodeFilter.remove( script["add2"] )
		
		self.failIf( g.nodeGadget( script["add1"] ) )
		self.failIf( g.nodeGadget( script["add2"] ) )
		
		nodeFilter.add( script["add1"] )
		
		self.failUnless( g.nodeGadget( script["add1"] ) )
		self.failIf( g.nodeGadget( script["add2"] ) )
		nodeFilter.add( script["add2"] )
		
		self.failUnless( g.nodeGadget( script["add1"] ) )
		self.failUnless( g.nodeGadget( script["add2"] ) )
	
	def testUnhidingConnectedDstNodes( self ) :
	
		script = Gaffer.ScriptNode()
		
		script["add1"] = GafferTest.AddNode()
		script["add2"] = GafferTest.AddNode()
		
		script["add2"]["op1"].setInput( script["add1"]["sum"] )
		
		nodeFilter = Gaffer.StandardSet( [ script["add1"] ] )
		g = GafferUI.GraphGadget( script, nodeFilter )
		self.failUnless( g.nodeGadget( script["add1"] ) )
		self.failIf( g.nodeGadget( script["add2"] ) )
		self.failIf( g.connectionGadget( script["add2"]["op1"] ) )
		
		nodeFilter.add( script["add2"] )
		
		self.failUnless( g.nodeGadget( script["add1"] ) )
		self.failUnless( g.nodeGadget( script["add2"] ) )
		self.failUnless( g.connectionGadget( script["add2"]["op1"] ) )
	
	def testCreatingWithHiddenSrcNodes( self ) :
	
		script = Gaffer.ScriptNode()
		
		script["add1"] = GafferTest.AddNode()
		script["add2"] = GafferTest.AddNode()
		
		script["add2"]["op1"].setInput( script["add1"]["sum"] )
		
		nodeFilter = Gaffer.StandardSet( [ script["add2"] ] )
		
		g = GafferUI.GraphGadget( script, nodeFilter )
		self.failIf( g.nodeGadget( script["add1"] ) )
		self.failUnless( g.nodeGadget( script["add2"] ) )
				
		c = g.connectionGadget( script["add2"]["op1"] )
		self.failUnless( c )
		
		self.failUnless( c.dstNodule().plug().isSame( script["add2"]["op1"] ) )
		self.assertEqual( c.srcNodule(), None )
		
	def testHidingConnectedDstNodes( self ) :
	
		script = Gaffer.ScriptNode()
		
		script["add1"] = GafferTest.AddNode()
		script["add2"] = GafferTest.AddNode()
		
		script["add2"]["op1"].setInput( script["add1"]["sum"] )
		
		nodeFilter = Gaffer.StandardSet( script.children() )
		g = GafferUI.GraphGadget( script, nodeFilter )
		self.failUnless( g.nodeGadget( script["add1"] ) )
		self.failUnless( g.nodeGadget( script["add2"] ) )
		self.failUnless( g.connectionGadget( script["add2"]["op1"] ) )
		
		nodeFilter.remove( script["add2"] )
		
		self.failUnless( g.nodeGadget( script["add1"] ) )
		self.failIf( g.nodeGadget( script["add2"] ) )
		self.failIf( g.connectionGadget( script["add2"]["op1"] ) )
		
	def testHidingConnectedSrcNodes( self ) :
	
		script = Gaffer.ScriptNode()
		
		script["add1"] = GafferTest.AddNode()
		script["add2"] = GafferTest.AddNode()
		
		script["add2"]["op1"].setInput( script["add1"]["sum"] )
		
		nodeFilter = Gaffer.StandardSet( [ script["add1"], script["add2"] ] )
		
		g = GafferUI.GraphGadget( script, nodeFilter )
		self.failUnless( g.nodeGadget( script["add1"] ) )
		self.failUnless( g.nodeGadget( script["add2"] ) )
				
		c = g.connectionGadget( script["add2"]["op1"] )
		self.failUnless( c )
		
		self.failUnless( c.srcNodule().plug().isSame( script["add1"]["sum"] ) )
		self.failUnless( c.dstNodule().plug().isSame( script["add2"]["op1"] ) )
		
		nodeFilter.remove( script["add1"] )
		self.failIf( g.nodeGadget( script["add1"] ) )
	
		c = g.connectionGadget( script["add2"]["op1"] )
		self.failUnless( c )
		self.failUnless( c.srcNodule() is None )
		self.failUnless( c.dstNodule().plug().isSame( script["add2"]["op1"] ) )
	
	def testConnectingInvisibleDstNodes( self ) :
	
		script = Gaffer.ScriptNode()
		
		script["add1"] = GafferTest.AddNode()
		script["add2"] = GafferTest.AddNode()
		
		nodeFilter = Gaffer.StandardSet( [ script["add1"] ] )
		g = GafferUI.GraphGadget( script, nodeFilter )
		self.failUnless( g.nodeGadget( script["add1"] ) )
		self.failIf( g.nodeGadget( script["add2"] ) )		
				
		script["add2"]["op1"].setInput( script["add1"]["sum"] )
		
		self.failUnless( g.nodeGadget( script["add1"] ) )
		self.failIf( g.nodeGadget( script["add2"] ) )
		self.failIf( g.connectionGadget( script["add2"]["op1"] ) )
		
	def testConnectingHiddenDstNodes( self ) :
	
		script = Gaffer.ScriptNode()
		
		script["add1"] = GafferTest.AddNode()
		script["add2"] = GafferTest.AddNode()
		
		nodeFilter = Gaffer.StandardSet( script.children() )
		g = GafferUI.GraphGadget( script, nodeFilter )
		self.failUnless( g.nodeGadget( script["add1"] ) )
		self.failUnless( g.nodeGadget( script["add2"] ) )		
		
		nodeFilter.remove( script["add2"] )
		
		self.failUnless( g.nodeGadget( script["add1"] ) )
		self.failIf( g.nodeGadget( script["add2"] ) )		
				
		script["add2"]["op1"].setInput( script["add1"]["sum"] )
		
		self.failUnless( g.nodeGadget( script["add1"] ) )
		self.failIf( g.nodeGadget( script["add2"] ) )
		self.failIf( g.connectionGadget( script["add2"]["op1"] ) )	
		
	def testConnectingHiddenSrcNodes( self ) :
	
		script = Gaffer.ScriptNode()
		
		script["add1"] = GafferTest.AddNode()
		script["add2"] = GafferTest.AddNode()
		
		nodeFilter = Gaffer.StandardSet( [ script["add2"] ] )
		g = GafferUI.GraphGadget( script, nodeFilter )
	
		self.failIf( g.nodeGadget( script["add1"] ) )
		self.failUnless( g.nodeGadget( script["add2"] ) )
		
		script["add2"]["op1"].setInput( script["add1"]["sum"] )
		
		self.failIf( g.nodeGadget( script["add1"] ) )
		self.failUnless( g.nodeGadget( script["add2"] ) )
		
		c = g.connectionGadget( script["add2"]["op1"] )
		self.failUnless( c )
		self.failUnless( c.srcNodule() is None )
		
	def testConnectingHiddenSrcNodesAndReshowing( self ) :
	
		script = Gaffer.ScriptNode()
		
		script["add1"] = GafferTest.AddNode()
		script["add2"] = GafferTest.AddNode()
		
		nodeFilter = Gaffer.StandardSet( [ script["add2"] ] )
		g = GafferUI.GraphGadget( script, nodeFilter )
	
		self.failIf( g.nodeGadget( script["add1"] ) )
		self.failUnless( g.nodeGadget( script["add2"] ) )
		
		script["add2"]["op1"].setInput( script["add1"]["sum"] )
		
		self.failIf( g.nodeGadget( script["add1"] ) )
		self.failUnless( g.nodeGadget( script["add2"] ) )
		
		c = g.connectionGadget( script["add2"]["op1"] )
		self.failUnless( c )
		self.failUnless( c.srcNodule() is None )
	
		nodeFilter.add( script["add1"] )
		
		self.failUnless( g.nodeGadget( script["add1"] ) )
		self.failUnless( g.nodeGadget( script["add2"] ) )
	
		c = g.connectionGadget( script["add2"]["op1"] )
		self.failUnless( c )
		self.failUnless( c.srcNodule().plug().isSame( script["add1"]["sum"] ) )
		
	def testChangingFilter( self ) :
	
		script = Gaffer.ScriptNode()
		
		script["add1"] = GafferTest.AddNode()
		script["add2"] = GafferTest.AddNode()
		
		nodeFilter = Gaffer.StandardSet( [ script["add1"] ] )
		g = GafferUI.GraphGadget( script, nodeFilter )
		self.failUnless( g.nodeGadget( script["add1"] ) )
		self.failIf( g.nodeGadget( script["add2"] ) )
		
		nodeFilter2 = Gaffer.StandardSet( [ script["add2"] ] )
		g.setFilter( nodeFilter2 )
		
		self.failIf( g.nodeGadget( script["add1"] ) )
		self.failUnless( g.nodeGadget( script["add2"] ) )
		
	def testChangingFilterAndEditingOriginal( self ) :
	
		script = Gaffer.ScriptNode()
		
		script["add1"] = GafferTest.AddNode()
		script["add2"] = GafferTest.AddNode()
		
		nodeFilter = Gaffer.StandardSet()
		g = GafferUI.GraphGadget( script, nodeFilter )
		self.failIf( g.nodeGadget( script["add1"] ) )
		self.failIf( g.nodeGadget( script["add2"] ) )
		
		nodeFilter2 = Gaffer.StandardSet( [ script["add2"] ] )
		g.setFilter( nodeFilter2 )
		
		self.failIf( g.nodeGadget( script["add1"] ) )
		self.failUnless( g.nodeGadget( script["add2"] ) )	
		
		nodeFilter.add( script["add1"] )
		
		self.failIf( g.nodeGadget( script["add1"] ) )
		self.failUnless( g.nodeGadget( script["add2"] ) )
	
	def testConnectionsForNestedPlugs( self ) :
	
		script = Gaffer.ScriptNode()
		
		script["n"] = NestedPlugTestNode()
		script["n"]["c"] = Gaffer.CompoundPlug()
		script["n"]["c"]["i"] = Gaffer.IntPlug()
		
		script["n2"] = NestedPlugTestNode()
		script["n2"]["c"] = Gaffer.CompoundPlug(  direction = Gaffer.Plug.Direction.Out )
		script["n2"]["c"]["o"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out )
		
		script["n"]["c"]["i"].setInput( script["n2"]["c"]["o"] )
		
		s = Gaffer.StandardSet( script.children() )
		g = GafferUI.GraphGadget( script, s )
				
		c = g.connectionGadget( script["n"]["c"]["i"] )
		self.failUnless( c )
		self.failUnless( c.srcNodule().plug().isSame( script["n2"]["c"]["o"] ) )
		self.failUnless( c.dstNodule().plug().isSame( script["n"]["c"]["i"] ) )
		
		s.remove( script["n2"] )
		
		self.failUnless( g.nodeGadget( script["n2"] ) is None )
		
		c = g.connectionGadget( script["n"]["c"]["i"] )
		self.failUnless( c )
		self.failUnless( c.srcNodule() is None )
		self.failUnless( c.dstNodule().plug().isSame( script["n"]["c"]["i"] ) )
		
		s.add( script["n2"] )
		
		self.failUnless( g.nodeGadget( script["n2"] ) )
		
		c = g.connectionGadget( script["n"]["c"]["i"] )
		self.failUnless( c )
		self.failUnless( c.srcNodule().plug().isSame( script["n2"]["c"]["o"] ) )
		self.failUnless( c.dstNodule().plug().isSame( script["n"]["c"]["i"] ) )
		
		s.remove( script["n"] )
		
		self.failUnless( g.nodeGadget( script["n"] ) is None )
		
		self.failUnless( g.connectionGadget( script["n"]["c"]["i"] ) is None )
		s.add( script["n"] )
		self.failUnless( g.nodeGadget( script["n"] ) )
		
		c = g.connectionGadget( script["n"]["c"]["i"] )
		self.failUnless( c )
		self.failUnless( c.srcNodule().plug().isSame( script["n2"]["c"]["o"] ) )
		self.failUnless( c.dstNodule().plug().isSame( script["n"]["c"]["i"] ) )
	def testRemovePlugWithInputConnection( self ) :
	
		script = Gaffer.ScriptNode()
		
		script["n1"] = Gaffer.Node()
		script["n2"] = Gaffer.Node()
		
		script["n1"]["o"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out )
		script["n2"]["i"] = Gaffer.IntPlug()
		
		script["n2"]["i"].setInput( script["n1"]["o"] )
		
		g = GafferUI.GraphGadget( script )
		
		self.failUnless( g.connectionGadget( script["n2"]["i"] ) is not None )
		
		with Gaffer.UndoContext( script ) :
			
			removedPlug = script["n2"]["i"]
			del script["n2"]["i"]
			
		self.failUnless( g.connectionGadget( removedPlug ) is None )
		script.undo()
		
		self.failUnless( g.connectionGadget( script["n2"]["i"] ) is not None )		
	
	def testRemovePlugWithOutputConnection( self ) :
	
		script = Gaffer.ScriptNode()
		
		script["n1"] = Gaffer.Node()
		script["n2"] = Gaffer.Node()
		
		script["n1"]["o"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out )
		script["n2"]["i"] = Gaffer.IntPlug()
		
		script["n2"]["i"].setInput( script["n1"]["o"] )
		
		g = GafferUI.GraphGadget( script )
		
		self.failUnless( g.connectionGadget( script["n2"]["i"] ) is not None )
		
		with Gaffer.UndoContext( script ) :
		
			del script["n1"]["o"]
			
		self.failUnless( g.connectionGadget( script["n2"]["i"] ) is None )
		script.undo()
		
		self.failUnless( g.connectionGadget( script["n2"]["i"] ) is not None )		
	
	def testConnectionBound( self ) :
	
		for i in range( 0, 100 ) :
		
			script = Gaffer.ScriptNode()
	
			script["n1"] = Gaffer.Node()
			script["n2"] = Gaffer.Node()
			
			script["n1"]["o"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out )
			script["n2"]["i"] = Gaffer.IntPlug()
			
			script["n2"]["i"].setInput( script["n1"]["o"] )
			
			g = GafferUI.GraphGadget( script )
			c = g.connectionGadget( script["n2"]["i"] )
	
			gb = IECore.Box3f()
			gb.extendBy( g.nodeGadget( script["n1"] ).bound() )
			gb.extendBy( g.nodeGadget( script["n2"] ).bound() )
			gb.min -= IECore.V3f( 10 )
			gb.max += IECore.V3f( 10 )
	
			b = c.bound()
			self.failIf( b.isEmpty() )
			
			self.failUnless( gb.contains( b ) )
	
	def testNoFilter( self ) :
	
		s = Gaffer.ScriptNode()
		s["n1"] = Gaffer.Node()
		
		g = GafferUI.GraphGadget( s )
		
		self.assertTrue( g.getRoot().isSame( s ) )
		self.assertTrue( g.getFilter() is None )
		self.assertTrue( g.nodeGadget( s["n1"] ) )
		
		s["n2"] = Gaffer.Node()
		self.assertTrue( g.nodeGadget( s["n1"] ) )		
	
	def testFilterIsChildSet( self ) :
	
		s = Gaffer.ScriptNode()
		s["n1"] = Gaffer.Node()
		
		g = GafferUI.GraphGadget( s, Gaffer.ChildSet( s ) )
		self.assertTrue( g.nodeGadget( s["n1"] ) )
		
		l = len( g )
		
		s["n2"] = Gaffer.Node()
		self.assertTrue( g.nodeGadget( s["n2"] ) )
		
		self.assertEqual( len( g ), l + 1 )
	
	def testSetRoot( self ) :
	
		s = Gaffer.ScriptNode()
		s["b"] = Gaffer.Box()
		s["b"]["n"] = Gaffer.Node()
		
		f = Gaffer.StandardSet( [ s["b"] ] )
		g = GafferUI.GraphGadget( s, f )
		
		self.assertTrue( g.nodeGadget( s["b"] ) )
		self.assertFalse( g.nodeGadget( s["b"]["n"] ) )
		
		g.setRoot( s["b"] )
		self.assertTrue( g.getRoot().isSame( s["b"] ) )
		self.assertEqual( g.getFilter(), None )
		
		self.assertTrue( g.nodeGadget( s["b"]["n"] ) )
		self.assertFalse( g.nodeGadget( s["b"] ) )
	
	def testRootChangedSignal( self ) :
	
		s = Gaffer.ScriptNode()
		s["b"] = Gaffer.Box()
		
		roots = []
		previousRoots = []
		def f( gg, previousRoot ) :
		
			self.failUnless( gg.isSame( g ) )
			roots.append( gg.getRoot() )
			previousRoots.append( previousRoot )
		
		g = GafferUI.GraphGadget( s )
		c = g.rootChangedSignal().connect( f )
		
		self.assertEqual( len( roots ), 0 )
		self.assertEqual( len( previousRoots ), 0 )
		
		g.setRoot( s["b"] )
		self.assertEqual( len( roots ), 1 )
		self.assertTrue( roots[0].isSame( s["b"] ) )
		self.assertEqual( len( previousRoots ), 1 )
		self.assertTrue( previousRoots[0].isSame( s ) )
		
		g.setRoot( s["b"] )
		self.assertEqual( len( roots ), 1 )
		self.assertTrue( roots[0].isSame( s["b"] ) )
		self.assertEqual( len( previousRoots ), 1 )
		self.assertTrue( previousRoots[0].isSame( s ) )
		
		g.setRoot( s )
		self.assertEqual( len( roots ), 2 )
		self.assertTrue( roots[1].isSame( s ) )
		self.assertEqual( len( previousRoots ), 2 )
		self.assertTrue( previousRoots[1].isSame( s["b"] ) )
		
	def testLifetime( self ) :
	
		s = Gaffer.ScriptNode()
		s["n"] = GafferTest.AddNode()
		
		e = GafferUI.NodeGraph( s )
		
		we = weakref.ref( e )
		del e
		
		self.assertEqual( we(), None )
			
	def testSetNodePosition( self ) :
	
		s = Gaffer.ScriptNode()
		s["n"] = Gaffer.Node()
		
		g = GafferUI.GraphGadget( s )
		
		g.setNodePosition( s["n"], IECore.V2f( -100, 2000 ) )
		self.assertEqual( g.getNodePosition( s["n"] ), IECore.V2f( -100, 2000 ) )
	def testTitle( self ) :
	
		s = Gaffer.ScriptNode()
		
		g = GafferUI.NodeGraph( s )
		
		self.assertEqual( g.getTitle(), "Node Graph" )
		
		g.setTitle( "This is a test!" )
		
		self.assertEqual( g.getTitle(), "This is a test!" )
		
	def testPlugConnectionGadgets( self ) :
		script = Gaffer.ScriptNode()
		
		script["add1"] = GafferTest.AddNode()
		script["add2"] = GafferTest.AddNode()
		script["add3"] = GafferTest.AddNode()
		script["add4"] = GafferTest.AddNode()
		
		script["add2"]["op1"].setInput( script["add1"]["sum"] )
		script["add3"]["op1"].setInput( script["add2"]["sum"] )
		script["add4"]["op2"].setInput( script["add2"]["sum"] )
				
		g = GafferUI.GraphGadget( script )
		
		c = g.connectionGadgets( script["add1"]["sum"] )
		self.assertEqual( len( c ), 1 )
		self.assertTrue( c[0].srcNodule().plug().isSame( script["add1"]["sum"] ) )
		self.assertTrue( c[0].dstNodule().plug().isSame( script["add2"]["op1"] ) )
		
		c = g.connectionGadgets( script["add1"]["sum"], excludedNodes = Gaffer.StandardSet( [ script["add2"] ] ) )
		self.assertEqual( len( c ), 0 )
		
		c = g.connectionGadgets( script["add2"]["sum"] )
		self.assertEqual( len( c ), 2 )
		self.assertTrue( c[0].srcNodule().plug().isSame( script["add2"]["sum"] ) )
		self.assertTrue( c[0].dstNodule().plug().isSame( script["add3"]["op1"] ) )
		self.assertTrue( c[1].srcNodule().plug().isSame( script["add2"]["sum"] ) )
		self.assertTrue( c[1].dstNodule().plug().isSame( script["add4"]["op2"] ) )
		
		c = g.connectionGadgets( script["add2"]["sum"], excludedNodes = Gaffer.StandardSet( [ script["add3"] ] ) )
		self.assertEqual( len( c ), 1 )
		self.assertTrue( c[0].srcNodule().plug().isSame( script["add2"]["sum"] ) )
		self.assertTrue( c[0].dstNodule().plug().isSame( script["add4"]["op2"] ) )
	def testNodeConnectionGadgets( self ) :
		script = Gaffer.ScriptNode()
		
		script["add1"] = GafferTest.AddNode()
		script["add2"] = GafferTest.AddNode()
		script["add3"] = GafferTest.AddNode()
		script["add4"] = GafferTest.AddNode()
		
		script["add2"]["op1"].setInput( script["add1"]["sum"] )
		script["add3"]["op1"].setInput( script["add2"]["sum"] )
		script["add4"]["op2"].setInput( script["add2"]["sum"] )
				
		g = GafferUI.GraphGadget( script )
		
		c = g.connectionGadgets( script["add1"] )
		self.assertEqual( len( c ), 1 )
		self.assertTrue( c[0].srcNodule().plug().isSame( script["add1"]["sum"] ) )
		self.assertTrue( c[0].dstNodule().plug().isSame( script["add2"]["op1"] ) )
		
		c = g.connectionGadgets( script["add1"], excludedNodes = Gaffer.StandardSet( [ script["add2"] ] ) )
		self.assertEqual( len( c ), 0 )
		
		c = g.connectionGadgets( script["add2"] )
		self.assertEqual( len( c ), 3 )
		self.assertTrue( c[0].srcNodule().plug().isSame( script["add1"]["sum"] ) )
		self.assertTrue( c[0].dstNodule().plug().isSame( script["add2"]["op1"] ) )
		self.assertTrue( c[1].srcNodule().plug().isSame( script["add2"]["sum"] ) )
		self.assertTrue( c[1].dstNodule().plug().isSame( script["add3"]["op1"] ) )
		self.assertTrue( c[2].srcNodule().plug().isSame( script["add2"]["sum"] ) )
		self.assertTrue( c[2].dstNodule().plug().isSame( script["add4"]["op2"] ) )
		
		c = g.connectionGadgets( script["add2"], excludedNodes = Gaffer.StandardSet( [ script["add3"] ] ) )
		self.assertEqual( len( c ), 2 )
		self.assertTrue( c[0].srcNodule().plug().isSame( script["add1"]["sum"] ) )
		self.assertTrue( c[0].dstNodule().plug().isSame( script["add2"]["op1"] ) )
		self.assertTrue( c[1].srcNodule().plug().isSame( script["add2"]["sum"] ) )
		self.assertTrue( c[1].dstNodule().plug().isSame( script["add4"]["op2"] ) )
	
	def testInternalConnectionsNotShown( self ) :
	
		# make sure they're not shown when they exist before graph visualisation
	
		script = Gaffer.ScriptNode()
		
		script["add1"] = GafferTest.AddNode()
		script["add1"]["sum"].setInput( script["add1"]["op1"] )
		script["add1"]["op1"].setInput( script["add1"]["op2"] )
		
		g = GafferUI.GraphGadget( script )
		self.assertEqual( len( g.connectionGadgets( script["add1"] ) ), 0 )
		self.assertEqual( g.connectionGadget( script["add1"]["sum"] ), None )
		self.assertEqual( g.connectionGadget( script["add1"]["op1"] ), None )
		self.assertEqual( g.connectionGadget( script["add1"]["op2"] ), None )
		
		# make sure they're not shown when they're made after graph visualisation
	
		script = Gaffer.ScriptNode()
		g = GafferUI.GraphGadget( script )
		
		script["add1"] = GafferTest.AddNode()
		script["add1"]["sum"].setInput( script["add1"]["op1"] )
		script["add1"]["op1"].setInput( script["add1"]["op2"] )
		
		self.assertEqual( len( g.connectionGadgets( script["add1"] ) ), 0 )
		self.assertEqual( g.connectionGadget( script["add1"]["sum"] ), None )
		self.assertEqual( g.connectionGadget( script["add1"]["op1"] ), None )
		self.assertEqual( g.connectionGadget( script["add1"]["op2"] ), None )
	
	def testConnectionMinimisedAccessors( self ) :
	
		script = Gaffer.ScriptNode()
		
		script["add1"] = GafferTest.AddNode()
		script["add2"] = GafferTest.AddNode()
		script["add3"] = GafferTest.AddNode()
		
		script["add2"]["op1"].setInput( script["add1"]["sum"] )
		script["add3"]["op1"].setInput( script["add2"]["sum"] )
				
		g = GafferUI.GraphGadget( script )
		
		self.assertFalse( g.getNodeInputConnectionsMinimised( script["add1"] ) )
		self.assertFalse( g.getNodeInputConnectionsMinimised( script["add2"] ) )
		self.assertFalse( g.getNodeInputConnectionsMinimised( script["add3"] ) )
		
		self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add1"] ) )
		self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add2"] ) )
		self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add3"] ) )
		
		g.setNodeInputConnectionsMinimised( script["add3"], True )
		
		self.assertFalse( g.getNodeInputConnectionsMinimised( script["add1"] ) )
		self.assertFalse( g.getNodeInputConnectionsMinimised( script["add2"] ) )
		self.assertTrue( g.getNodeInputConnectionsMinimised( script["add3"] ) )
		
		self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add1"] ) )
		self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add2"] ) )
		self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add3"] ) )
		
		g.setNodeOutputConnectionsMinimised( script["add2"], True )
		
		self.assertFalse( g.getNodeInputConnectionsMinimised( script["add1"] ) )
		self.assertFalse( g.getNodeInputConnectionsMinimised( script["add2"] ) )
		self.assertTrue( g.getNodeInputConnectionsMinimised( script["add3"] ) )
		
		self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add1"] ) )
		self.assertTrue( g.getNodeOutputConnectionsMinimised( script["add2"] ) )
		self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add3"] ) )
		
		g.setNodeOutputConnectionsMinimised( script["add2"], False )
		
		self.assertFalse( g.getNodeInputConnectionsMinimised( script["add1"] ) )
		self.assertFalse( g.getNodeInputConnectionsMinimised( script["add2"] ) )
		self.assertTrue( g.getNodeInputConnectionsMinimised( script["add3"] ) )
		
		self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add1"] ) )
		self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add2"] ) )
		self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add3"] ) )
		g.setNodeInputConnectionsMinimised( script["add3"], False )
		
		self.assertFalse( g.getNodeInputConnectionsMinimised( script["add1"] ) )
		self.assertFalse( g.getNodeInputConnectionsMinimised( script["add2"] ) )
		self.assertFalse( g.getNodeInputConnectionsMinimised( script["add3"] ) )
		
		self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add1"] ) )
		self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add2"] ) )
		self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add3"] ) )
	
	def testConnectionMinimisation( self ) :
	
		script = Gaffer.ScriptNode()
		
		script["add1"] = GafferTest.AddNode()
		script["add2"] = GafferTest.AddNode()
		script["add3"] = GafferTest.AddNode()
		
		g = GafferUI.GraphGadget( script )
		
		g.setNodeOutputConnectionsMinimised( script["add1"], True )
		
		script["add2"]["op1"].setInput( script["add1"]["sum"] )
		
		c1 = g.connectionGadget( script["add2"]["op1"] )
		self.assertTrue( c1.getMinimised() )
		
		script["add3"]["op1"].setInput( script["add2"]["sum"] )
		
		c2 = g.connectionGadget( script["add3"]["op1"] )
		self.assertFalse( c2.getMinimised() )
		
		g.setNodeInputConnectionsMinimised( script["add2"], True )
		
		self.assertTrue( c1.getMinimised() )
		self.assertFalse( c2.getMinimised() )
		
		g.setNodeOutputConnectionsMinimised( script["add1"], False )
		
		self.assertTrue( c1.getMinimised() )
		self.assertFalse( c2.getMinimised() )
		
		g.setNodeInputConnectionsMinimised( script["add2"], False )
		
		self.assertFalse( c1.getMinimised() )
		self.assertFalse( c2.getMinimised() )
	
	def testNodeGadgetCreatorReturningNull( self ) :
	
		class InvisibleNode( GafferTest.AddNode ) :
		
			def __init__( self, name = "InvisibleNode" ) :
			
				GafferTest.AddNode.__init__( self, name )
				
		IECore.registerRunTimeTyped( InvisibleNode )
		
		GafferUI.NodeGadget.registerNodeGadget( InvisibleNode.staticTypeId(), lambda node : None )
		
		script = Gaffer.ScriptNode()
		g = GafferUI.GraphGadget( script )
		
		script["n1"] = InvisibleNode()
		script["n2"] = InvisibleNode()
		
		self.assertEqual( g.nodeGadget( script["n1"] ), None )
		self.assertEqual( g.nodeGadget( script["n2"] ), None )
		
		script["n2"]["op1"].setInput( script["n1"]["sum"] )
		
		self.assertEqual( g.connectionGadget( script["n2"]["op1"] ), None )
		
		# in case it wasn't clear, hiding the nodes has zero
		# effect on their computations.
		
		script["n1"]["op1"].setValue( 12 )
		script["n1"]["op2"].setValue( 13 )
		script["n2"]["op2"].setValue( 100 )
		
		self.assertEqual( script["n2"]["sum"].getValue(), 125 )
	
	def testUpstreamNodeGadgets( self ) :
	
		script = Gaffer.ScriptNode()
		
		# a -> b -> c -> e -> f
		#           ^
		#           |
		#			d
		
		script["a"] = GafferTest.AddNode()
		script["b"] = GafferTest.AddNode()
		script["c"] = GafferTest.AddNode()
		script["d"] = GafferTest.AddNode()
		script["e"] = GafferTest.AddNode()
		script["f"] = GafferTest.AddNode()
		
		script["b"]["op1"].setInput( script["a"]["sum"] )
		script["c"]["op1"].setInput( script["b"]["sum"] )
		script["c"]["op2"].setInput( script["d"]["sum"] )
		script["e"]["op1"].setInput( script["c"]["sum"] )
		script["f"]["op1"].setInput( script["e"]["sum"] )
		
		g = GafferUI.GraphGadget( script )
		
		u = [ x.node().relativeName( script ) for x in g.upstreamNodeGadgets( script["c"] ) ]
		
		self.assertEqual( len( u ), 3 )
		self.assertEqual( set( u ), set( [ "a", "b", "d" ] ) )
		u = [ x.node().relativeName( script ) for x in g.upstreamNodeGadgets( script["f"] ) ]
		self.assertEqual( len( u ), 5 )
		self.assertEqual( set( u ), set( [ "a", "b", "d", "c", "e" ] ) )
		
		# filtered nodes should be ignored
		
		g.setFilter( Gaffer.StandardSet( [ script["f"], script["e"], script["a"] ] ) )
		
		u = [ x.node().relativeName( script ) for x in g.upstreamNodeGadgets( script["f"] ) ]
		self.assertEqual( u, [ "e" ] )
	
	def testSelectionHighlighting( self ) :
	
		script = Gaffer.ScriptNode()
		
		script["a"] = GafferTest.AddNode()
		script["b"] = GafferTest.AddNode()
		
		script.selection().add( script["a"] )
		
		g = GafferUI.GraphGadget( script )
		
		self.assertTrue( g.nodeGadget( script["a"] ).getHighlighted() )
		self.assertFalse( g.nodeGadget( script["b"] ).getHighlighted() )
		
		script.selection().add( script["b"] )
		
		self.assertTrue( g.nodeGadget( script["a"] ).getHighlighted() )
		self.assertTrue( g.nodeGadget( script["b"] ).getHighlighted() )
		script.selection().remove( script["a"] )
		self.assertFalse( g.nodeGadget( script["a"] ).getHighlighted() )
		self.assertTrue( g.nodeGadget( script["b"] ).getHighlighted() )
		
		script.selection().clear()
		
		self.assertFalse( g.nodeGadget( script["a"] ).getHighlighted() )
		self.assertFalse( g.nodeGadget( script["b"] ).getHighlighted() )
	
	def testNoDuplicatePositionPlugsAfterPasting( self ) :
	
		script = Gaffer.ScriptNode()
		script["n"] = Gaffer.Node()
		
		g = GafferUI.GraphGadget( script )
		
		script.execute( script.serialise( script, Gaffer.StandardSet( [ script["n"] ] ) ) )
		
		self.assertTrue( "__uiPosition" in script["n1"] )
		self.assertFalse( "__uiPosition1" in script["n1"] )
		
if __name__ == "__main__":
	unittest.main()
 | |
| 
	import torch
import torch.nn as nn
import torch.utils.data as data_utils
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.autograd import Variable
from collections import OrderedDict
class PreTrainedModel(nn.Module):
    def __init__(self, pretrained):
        super(PreTrainedModel, self).__init__()
        common_features_net = nn.Sequential(*list(pretrained.children())[0:1])
        self.net_16M = nn.Sequential(OrderedDict([
            ('conv0', common_features_net[0].conv0),
            ('norm0', common_features_net[0].norm0),
            ('relu0', common_features_net[0].relu0)
        ]))
        self.net_8M = nn.Sequential(OrderedDict([
            ('pool0', common_features_net[0].pool0)
        ]))
        self.net_4M = nn.Sequential(OrderedDict([
            ('denseblock1', common_features_net[0].denseblock1),
            ('transition1', common_features_net[0].transition1)
        ]))
        self.net_2M = nn.Sequential(OrderedDict([
            ('denseblock2', common_features_net[0].denseblock2),
            ('transition2', common_features_net[0].transition2)
        ]))
        self.net_1M = nn.Sequential(OrderedDict([
            ('denseblock3', common_features_net[0].denseblock3),
            ('transition3', common_features_net[0].transition3),
            ('denseblock4', common_features_net[0].denseblock4)
        ]))
    def forward(self, ft_32M):
        
        pretrained_features = [0]*5
        pretrained_features[0] = self.net_16M(ft_32M)
        pretrained_features[1]  = self.net_8M(pretrained_features[0])
        pretrained_features[2]  = self.net_4M(pretrained_features[1])
        pretrained_features[3]  = self.net_2M(pretrained_features[2])
        pretrained_features[4]  = self.net_1M(pretrained_features[3])
        return pretrained_features
class _DenseLayer(nn.Sequential):
    def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, ks=3):
        super(_DenseLayer, self).__init__()
        self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
        self.add_module('relu1', nn.ReLU(inplace=True)),
        self.add_module('conv1', nn.Conv2d(num_input_features, bn_size *
                        growth_rate, kernel_size=1, stride=1, bias=False)),
        self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
        self.add_module('relu2', nn.ReLU(inplace=True)),
        self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,
                        kernel_size=ks, stride=1, padding=(ks-1)//2, bias=False)),
        self.drop_rate = drop_rate
    def forward(self, x):
        new_features = super(_DenseLayer, self).forward(x)
        if self.drop_rate > 0:
            new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
        return torch.cat([x, new_features], 1)
class _DenseBlock(nn.Sequential):
    def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, ks=3):
        super(_DenseBlock, self).__init__()
        for i in range(num_layers):
            layer = _DenseLayer(num_input_features + i * growth_rate, growth_rate, bn_size, drop_rate, ks=ks)
            self.add_module('denselayer%d' % (i + 1), layer)
class _MyTransition(nn.Sequential):
    def __init__(self, num_input_features, num_output_features, pool_ks=3):
        super(_MyTransition, self).__init__()
        self.add_module('norm', nn.BatchNorm2d(num_input_features))
        self.add_module('relu', nn.ReLU(inplace=True))
        self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,
                                          kernel_size=1, stride=1, padding=0, bias=False))
        self.add_module('pool', nn.AvgPool2d(kernel_size=pool_ks, stride=1, padding=(pool_ks-1)//2))
    
class GradientNet(nn.Module):
    def build_blocks(self, num_block, num_init_features, pool_ks=3, ks=3, bn_size=4, growth_rate=32):
        drop_rate = 0
        num_features = num_init_features
        features = nn.Sequential()
        for i, num_layers in enumerate(num_block):
            block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,
                                bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate, ks=ks)
            features.add_module('mydenseblock%d' % (i + 1), block)
            num_features = num_features + num_layers * growth_rate
            
            trans = _MyTransition(num_input_features=num_features, num_output_features=num_features // 2, pool_ks=pool_ks)
            features.add_module('mytransition%d' % (i + 1), trans)
            num_features = num_features // 2
#         return features.cuda()
        return features
    
    def __init__(self, use_gpu=True, bn_size=4, growth_rate=32):
        super(GradientNet, self).__init__()
        self.block_config = [(6,6,6),(6,6,6),(12,12,12),(16,16,16),(24,24,24)]
        self.num_input_features = [64,64,128,256,1024]
        self.upsample_config = [2*2,4*2,8*2,16*2,32*2]
        
        # upsample pretrained features
        self.upsample_8M_for_16M = nn.Sequential(OrderedDict([
            ('compress', nn.Conv2d(64,16,1)),
            ('upsample', nn.Upsample(scale_factor=2, mode='bilinear'))
        ]))
        self.upsample_4M_for_16M = nn.Sequential(OrderedDict([
            ('compress', nn.Conv2d(128,16,1)),
            ('upsample', nn.Upsample(scale_factor=4, mode='bilinear'))
        ]))
        self.upsample_2M_for_16M = nn.Sequential(OrderedDict([
            ('compress', nn.Conv2d(256,16,1)),
            ('upsample', nn.Upsample(scale_factor=8, mode='bilinear'))
        ]))
        self.upsample_1M_for_16M = nn.Sequential(OrderedDict([
            ('compress', nn.Conv2d(1024,16,1)),
            ('upsample', nn.Upsample(scale_factor=16, mode='bilinear'))
        ]))
        self.compress16M = nn.Conv2d(64+4*16, 64, 1)
        # upsample pretrained features
        self.upsample_4M_for_8M = nn.Sequential(OrderedDict([
            ('compress', nn.Conv2d(128,16,1)),
            ('upsample', nn.Upsample(scale_factor=2, mode='bilinear'))
        ]))
        self.upsample_2M_for_8M = nn.Sequential(OrderedDict([
            ('compress', nn.Conv2d(256,16,1)),
            ('upsample', nn.Upsample(scale_factor=4, mode='bilinear'))
        ]))
        self.upsample_1M_for_8M = nn.Sequential(OrderedDict([
            ('compress', nn.Conv2d(1024,16,1)),
            ('upsample', nn.Upsample(scale_factor=8, mode='bilinear'))
        ]))
        self.compress8M = nn.Conv2d(64+3*16, 64, 1)
        # upsample pretrained features
        self.upsample_2M_for_4M = nn.Sequential(OrderedDict([
            ('compress', nn.Conv2d(256,64,1)),
            ('upsample', nn.Upsample(scale_factor=2, mode='bilinear'))
        ]))
        self.upsample_1M_for_4M = nn.Sequential(OrderedDict([
            ('compress', nn.Conv2d(1024,64,1)),
            ('upsample', nn.Upsample(scale_factor=4, mode='bilinear'))
        ]))
        self.compress4M = nn.Conv2d(128+2*64, 128, 1)
        # upsample pretrained features
        self.upsample_1M_for_2M = nn.Sequential(OrderedDict([
            ('compress', nn.Conv2d(1024,256,1)),
            ('upsample', nn.Upsample(scale_factor=2, mode='bilinear'))
        ]))
        self.compress2M = nn.Conv2d(256+256, 256, 1)
        i=0; self.denseblock16 = self.build_blocks(self.block_config[i], self.num_input_features[i], ks=7, bn_size=bn_size, growth_rate=growth_rate)
        i=1; self.denseblock08 = self.build_blocks(self.block_config[i], self.num_input_features[i], ks=5, bn_size=bn_size, growth_rate=growth_rate)
        i=2; self.denseblock04 = self.build_blocks(self.block_config[i], self.num_input_features[i], ks=3, bn_size=bn_size, growth_rate=growth_rate)
        i=3; self.denseblock02 = self.build_blocks(self.block_config[i], self.num_input_features[i], ks=3, bn_size=bn_size, growth_rate=growth_rate)
        i=4; self.denseblock01 = self.build_blocks(self.block_config[i], self.num_input_features[i], ks=3, bn_size=bn_size, growth_rate=growth_rate)
        
        
        # upsample final
        self.num_upsample_input_features = [176,176,352,480,800]
        # i=0; self.upsample16 = nn.ConvTranspose2d(in_channels=self.num_upsample_input_features[i], out_channels=3, kernel_size=self.upsample_config[i], stride=2, padding=1, output_padding=0, groups=1, bias=True, dilation=1)
        # i=1; self.upsample08 = nn.ConvTranspose2d(in_channels=self.num_upsample_input_features[i], out_channels=3, kernel_size=self.upsample_config[i], stride=4, padding=2, output_padding=0, groups=1, bias=True, dilation=1)
        # i=2; self.upsample04 = nn.ConvTranspose2d(in_channels=self.num_upsample_input_features[i], out_channels=3, kernel_size=self.upsample_config[i], stride=8, padding=4, output_padding=0, groups=1, bias=True, dilation=1)
        # i=3; self.upsample02 = nn.ConvTranspose2d(in_channels=self.num_upsample_input_features[i], out_channels=3, kernel_size=self.upsample_config[i], stride=16, padding=8, output_padding=0, groups=1, bias=True, dilation=1)
        # i=4; self.upsample01 = nn.ConvTranspose2d(in_channels=self.num_upsample_input_features[i], out_channels=3, kernel_size=self.upsample_config[i], stride=32, padding=16, output_padding=0, groups=1, bias=True, dilation=1)
        
        i=0; self.compress16 = nn.Conv2d(self.num_upsample_input_features[i], 3, 1)
        i=1; self.compress08 = nn.Conv2d(self.num_upsample_input_features[i], 3, 1)
        i=2; self.compress04 = nn.Conv2d(self.num_upsample_input_features[i], 3, 1)
        i=3; self.compress02 = nn.Conv2d(self.num_upsample_input_features[i], 3, 1)
        i=4; self.compress01 = nn.Conv2d(self.num_upsample_input_features[i], 3, 1)
        
        i=0; self.upsample16 = nn.Sequential(OrderedDict([
                ('bilinear', nn.Upsample(scale_factor=2, mode='bilinear'))
            ]))
        i=1; self.upsample08 = nn.Sequential(OrderedDict([
                ('bilinear', nn.Upsample(scale_factor=4, mode='bilinear'))
            ]))
        i=2; self.upsample04 = nn.Sequential(OrderedDict([
                ('bilinear', nn.Upsample(scale_factor=8, mode='bilinear'))
            ]))
        i=3; self.upsample02 = nn.Sequential(OrderedDict([
                ('bilinear', nn.Upsample(scale_factor=16, mode='bilinear'))
            ]))
        i=4; self.upsample01 = nn.Sequential(OrderedDict([
                ('bilinear', nn.Upsample(scale_factor=32, mode='bilinear'))
            ]))
        """merge v1"""
        # self.merge = nn.Sequential()
        # self.merge_in_channels =  (3*len(self.block_config), 64, 32, 16)
        # self.merge_out_channels = (                      64, 32, 16,  3)
        # for i in range(0, len(self.merge_out_channels)): 
        #     self.merge.add_module('merge.norm.%d'%i, nn.BatchNorm2d(self.merge_in_channels[i])),
        #     self.merge.add_module('merge.relu.%d'%i, nn.ReLU(inplace=True)),
        #     self.merge.add_module('merge.conv.%d'%i, nn.Conv2d(in_channels=self.merge_in_channels[i], 
        #                         out_channels=self.merge_out_channels[i], kernel_size=1))
        # self.merge.add_module('merge.final', nn.Sigmoid())
        
        """merge v2"""
        self.merge = nn.Sequential()
        self.merge.add_module('merge_denseblock', self.build_blocks((3,3,3), 3*len(self.block_config), pool_ks=1, bn_size=bn_size, growth_rate=growth_rate))
        self.merge.add_module('merge_final_conv', nn.Conv2d(in_channels=85, out_channels=3, kernel_size=1))
        self.merge.add_module('merge_final_sigmoid', nn.Sigmoid())
        
    def forward(self, ft_pretrained):
        # ft_pretrained = self.pretrained_model(ft_input)
        ft_predict   = [0]*len(ft_pretrained)
        ft_upsampled = [0]*len(ft_pretrained)
        
        upsampled_8M_for_16M = self.upsample_8M_for_16M(ft_pretrained[1])
        upsampled_4M_for_16M = self.upsample_4M_for_16M(ft_pretrained[2])
        upsampled_2M_for_16M = self.upsample_2M_for_16M(ft_pretrained[3])
        upsampled_1M_for_16M = self.upsample_1M_for_16M(ft_pretrained[4])
        
        _16M = torch.cat([
            ft_pretrained[0],
            upsampled_8M_for_16M,
            upsampled_4M_for_16M,
            upsampled_2M_for_16M,
            upsampled_1M_for_16M
        ], 1)
        _16M = self.compress16M(_16M)
        upsampled_4M_for_8M = self.upsample_4M_for_8M(ft_pretrained[2])
        upsampled_2M_for_8M = self.upsample_2M_for_8M(ft_pretrained[3])
        upsampled_1M_for_8M = self.upsample_1M_for_8M(ft_pretrained[4])
        
        _8M = torch.cat([
            ft_pretrained[1],
            upsampled_4M_for_8M,
            upsampled_2M_for_8M,
            upsampled_1M_for_8M
        ], 1)
        
        _8M = self.compress8M(_8M)
        
        upsampled_2M_for_4M = self.upsample_2M_for_4M(ft_pretrained[3])
        upsampled_1M_for_4M = self.upsample_1M_for_4M(ft_pretrained[4])
        
        _4M = torch.cat([
            ft_pretrained[2],
            upsampled_2M_for_4M,
            upsampled_1M_for_4M
        ], 1)
        
        _4M = self.compress4M(_4M)
        upsampled_1M_for_2M = self.upsample_1M_for_2M(ft_pretrained[4])
        
        _2M = torch.cat([
            ft_pretrained[3],
            upsampled_1M_for_2M
        ], 1)
        
        _2M = self.compress2M(_2M)
        i = 0; ft_predict[i] = self.denseblock16(_16M)
        i = 1; ft_predict[i] = self.denseblock08(_8M)
        i = 2; ft_predict[i] = self.denseblock04(_4M)
        i = 3; ft_predict[i] = self.denseblock02(_2M)
        i = 4; ft_predict[i] = self.denseblock01(ft_pretrained[i])
        
        i = 0; ft_predict[i] = self.compress16(ft_predict[i])
        i = 1; ft_predict[i] = self.compress08(ft_predict[i])
        i = 2; ft_predict[i] = self.compress04(ft_predict[i])
        i = 3; ft_predict[i] = self.compress02(ft_predict[i])
        i = 4; ft_predict[i] = self.compress01(ft_predict[i])
        
        i = 0; ft_upsampled[i] = self.upsample16(ft_predict[i])
        i = 1; ft_upsampled[i] = self.upsample08(ft_predict[i])
        i = 2; ft_upsampled[i] = self.upsample04(ft_predict[i])
        i = 3; ft_upsampled[i] = self.upsample02(ft_predict[i])
        i = 4; ft_upsampled[i] = self.upsample01(ft_predict[i])
        
        ft_concated = torch.cat(ft_upsampled, 1)
        ft_merged = self.merge(ft_concated)
        ft_output = ft_predict[0:5] + [ft_merged]
        return ft_output
 | |
| 
	"""The tests for the Geofency device tracker platform."""
# pylint: disable=redefined-outer-name
from unittest.mock import patch
import pytest
from homeassistant.components import zone
from homeassistant.components.geofency import (
    CONF_MOBILE_BEACONS, URL, DOMAIN)
from homeassistant.const import (
    HTTP_OK, HTTP_UNPROCESSABLE_ENTITY, STATE_HOME,
    STATE_NOT_HOME)
from homeassistant.setup import async_setup_component
from homeassistant.util import slugify
HOME_LATITUDE = 37.239622
HOME_LONGITUDE = -115.815811
NOT_HOME_LATITUDE = 37.239394
NOT_HOME_LONGITUDE = -115.763283
GPS_ENTER_HOME = {
    'latitude': HOME_LATITUDE,
    'longitude': HOME_LONGITUDE,
    'device': '4A7FE356-2E9D-4264-A43F-BF80ECAEE416',
    'name': 'Home',
    'radius': 100,
    'id': 'BAAD384B-A4AE-4983-F5F5-4C2F28E68205',
    'date': '2017-08-19T10:53:53Z',
    'address': 'Testing Trail 1',
    'entry': '1'
}
GPS_EXIT_HOME = {
    'latitude': HOME_LATITUDE,
    'longitude': HOME_LONGITUDE,
    'device': '4A7FE356-2E9D-4264-A43F-BF80ECAEE416',
    'name': 'Home',
    'radius': 100,
    'id': 'BAAD384B-A4AE-4983-F5F5-4C2F28E68205',
    'date': '2017-08-19T10:53:53Z',
    'address': 'Testing Trail 1',
    'entry': '0'
}
BEACON_ENTER_HOME = {
    'latitude': HOME_LATITUDE,
    'longitude': HOME_LONGITUDE,
    'beaconUUID': 'FFEF0E83-09B2-47C8-9837-E7B563F5F556',
    'minor': '36138',
    'major': '8629',
    'device': '4A7FE356-2E9D-4264-A43F-BF80ECAEE416',
    'name': 'Home',
    'radius': 100,
    'id': 'BAAD384B-A4AE-4983-F5F5-4C2F28E68205',
    'date': '2017-08-19T10:53:53Z',
    'address': 'Testing Trail 1',
    'entry': '1'
}
BEACON_EXIT_HOME = {
    'latitude': HOME_LATITUDE,
    'longitude': HOME_LONGITUDE,
    'beaconUUID': 'FFEF0E83-09B2-47C8-9837-E7B563F5F556',
    'minor': '36138',
    'major': '8629',
    'device': '4A7FE356-2E9D-4264-A43F-BF80ECAEE416',
    'name': 'Home',
    'radius': 100,
    'id': 'BAAD384B-A4AE-4983-F5F5-4C2F28E68205',
    'date': '2017-08-19T10:53:53Z',
    'address': 'Testing Trail 1',
    'entry': '0'
}
BEACON_ENTER_CAR = {
    'latitude': NOT_HOME_LATITUDE,
    'longitude': NOT_HOME_LONGITUDE,
    'beaconUUID': 'FFEF0E83-09B2-47C8-9837-E7B563F5F556',
    'minor': '36138',
    'major': '8629',
    'device': '4A7FE356-2E9D-4264-A43F-BF80ECAEE416',
    'name': 'Car 1',
    'radius': 100,
    'id': 'BAAD384B-A4AE-4983-F5F5-4C2F28E68205',
    'date': '2017-08-19T10:53:53Z',
    'address': 'Testing Trail 1',
    'entry': '1'
}
BEACON_EXIT_CAR = {
    'latitude': NOT_HOME_LATITUDE,
    'longitude': NOT_HOME_LONGITUDE,
    'beaconUUID': 'FFEF0E83-09B2-47C8-9837-E7B563F5F556',
    'minor': '36138',
    'major': '8629',
    'device': '4A7FE356-2E9D-4264-A43F-BF80ECAEE416',
    'name': 'Car 1',
    'radius': 100,
    'id': 'BAAD384B-A4AE-4983-F5F5-4C2F28E68205',
    'date': '2017-08-19T10:53:53Z',
    'address': 'Testing Trail 1',
    'entry': '0'
}
@pytest.fixture(autouse=True)
def mock_dev_track(mock_device_tracker_conf):
    """Mock device tracker config loading."""
    pass
@pytest.fixture
def geofency_client(loop, hass, hass_client):
    """Geofency mock client."""
    assert loop.run_until_complete(async_setup_component(
        hass, DOMAIN, {
            DOMAIN: {
                CONF_MOBILE_BEACONS: ['Car 1']
            }}))
    loop.run_until_complete(hass.async_block_till_done())
    with patch('homeassistant.components.device_tracker.update_config'):
        yield loop.run_until_complete(hass_client())
@pytest.fixture(autouse=True)
def setup_zones(loop, hass):
    """Set up Zone config in HA."""
    assert loop.run_until_complete(async_setup_component(
        hass, zone.DOMAIN, {
            'zone': {
                'name': 'Home',
                'latitude': HOME_LATITUDE,
                'longitude': HOME_LONGITUDE,
                'radius': 100,
            }}))
async def test_data_validation(geofency_client):
    """Test data validation."""
    # No data
    req = await geofency_client.post(URL)
    assert req.status == HTTP_UNPROCESSABLE_ENTITY
    missing_attributes = ['address', 'device',
                          'entry', 'latitude', 'longitude', 'name']
    # missing attributes
    for attribute in missing_attributes:
        copy = GPS_ENTER_HOME.copy()
        del copy[attribute]
        req = await geofency_client.post(URL, data=copy)
        assert req.status == HTTP_UNPROCESSABLE_ENTITY
async def test_gps_enter_and_exit_home(hass, geofency_client):
    """Test GPS based zone enter and exit."""
    # Enter the Home zone
    req = await geofency_client.post(URL, data=GPS_ENTER_HOME)
    await hass.async_block_till_done()
    assert req.status == HTTP_OK
    device_name = slugify(GPS_ENTER_HOME['device'])
    state_name = hass.states.get('{}.{}'.format(
        'device_tracker', device_name)).state
    assert STATE_HOME == state_name
    # Exit the Home zone
    req = await geofency_client.post(URL, data=GPS_EXIT_HOME)
    await hass.async_block_till_done()
    assert req.status == HTTP_OK
    device_name = slugify(GPS_EXIT_HOME['device'])
    state_name = hass.states.get('{}.{}'.format(
        'device_tracker', device_name)).state
    assert STATE_NOT_HOME == state_name
    # Exit the Home zone with "Send Current Position" enabled
    data = GPS_EXIT_HOME.copy()
    data['currentLatitude'] = NOT_HOME_LATITUDE
    data['currentLongitude'] = NOT_HOME_LONGITUDE
    req = await geofency_client.post(URL, data=data)
    await hass.async_block_till_done()
    assert req.status == HTTP_OK
    device_name = slugify(GPS_EXIT_HOME['device'])
    current_latitude = hass.states.get('{}.{}'.format(
        'device_tracker', device_name)).attributes['latitude']
    assert NOT_HOME_LATITUDE == current_latitude
    current_longitude = hass.states.get('{}.{}'.format(
        'device_tracker', device_name)).attributes['longitude']
    assert NOT_HOME_LONGITUDE == current_longitude
async def test_beacon_enter_and_exit_home(hass, geofency_client):
    """Test iBeacon based zone enter and exit - a.k.a stationary iBeacon."""
    # Enter the Home zone
    req = await geofency_client.post(URL, data=BEACON_ENTER_HOME)
    await hass.async_block_till_done()
    assert req.status == HTTP_OK
    device_name = slugify("beacon_{}".format(BEACON_ENTER_HOME['name']))
    state_name = hass.states.get('{}.{}'.format(
        'device_tracker', device_name)).state
    assert STATE_HOME == state_name
    # Exit the Home zone
    req = await geofency_client.post(URL, data=BEACON_EXIT_HOME)
    await hass.async_block_till_done()
    assert req.status == HTTP_OK
    device_name = slugify("beacon_{}".format(BEACON_ENTER_HOME['name']))
    state_name = hass.states.get('{}.{}'.format(
        'device_tracker', device_name)).state
    assert STATE_NOT_HOME == state_name
async def test_beacon_enter_and_exit_car(hass, geofency_client):
    """Test use of mobile iBeacon."""
    # Enter the Car away from Home zone
    req = await geofency_client.post(URL, data=BEACON_ENTER_CAR)
    await hass.async_block_till_done()
    assert req.status == HTTP_OK
    device_name = slugify("beacon_{}".format(BEACON_ENTER_CAR['name']))
    state_name = hass.states.get('{}.{}'.format(
        'device_tracker', device_name)).state
    assert STATE_NOT_HOME == state_name
    # Exit the Car away from Home zone
    req = await geofency_client.post(URL, data=BEACON_EXIT_CAR)
    await hass.async_block_till_done()
    assert req.status == HTTP_OK
    device_name = slugify("beacon_{}".format(BEACON_ENTER_CAR['name']))
    state_name = hass.states.get('{}.{}'.format(
        'device_tracker', device_name)).state
    assert STATE_NOT_HOME == state_name
    # Enter the Car in the Home zone
    data = BEACON_ENTER_CAR.copy()
    data['latitude'] = HOME_LATITUDE
    data['longitude'] = HOME_LONGITUDE
    req = await geofency_client.post(URL, data=data)
    await hass.async_block_till_done()
    assert req.status == HTTP_OK
    device_name = slugify("beacon_{}".format(data['name']))
    state_name = hass.states.get('{}.{}'.format(
        'device_tracker', device_name)).state
    assert STATE_HOME == state_name
    # Exit the Car in the Home zone
    req = await geofency_client.post(URL, data=data)
    await hass.async_block_till_done()
    assert req.status == HTTP_OK
    device_name = slugify("beacon_{}".format(data['name']))
    state_name = hass.states.get('{}.{}'.format(
        'device_tracker', device_name)).state
    assert STATE_HOME == state_name
 | |
| 
	import os
from os import path as op
import numpy as np
from numpy.polynomial import legendre
from ..parallel import parallel_func
from ..utils import logger, _get_extra_data_path
##############################################################################
# FAST LEGENDRE (DERIVATIVE) POLYNOMIALS USING LOOKUP TABLE
def _next_legen_der(n, x, p0, p01, p0d, p0dd):
    """Compute the next Legendre polynomial and its derivatives"""
    # only good for n > 1 !
    help_ = p0
    helpd = p0d
    p0 = ((2 * n - 1) * x * help_ - (n - 1) * p01) / n
    p0d = n * help_ + x * helpd
    p0dd = (n + 1) * helpd + x * p0dd
    p01 = help_
    return p0, p0d, p0dd
def _get_legen(x, n_coeff=100):
    """Get Legendre polynomials expanded about x"""
    return legendre.legvander(x, n_coeff - 1)
def _get_legen_der(xx, n_coeff=100):
    """Get Legendre polynomial derivatives expanded about x"""
    coeffs = np.empty((len(xx), n_coeff, 3))
    for c, x in zip(coeffs, xx):
        p0s, p0ds, p0dds = c[:, 0], c[:, 1], c[:, 2]
        p0s[:2] = [1.0, x]
        p0ds[:2] = [0.0, 1.0]
        p0dds[:2] = [0.0, 0.0]
        for n in range(2, n_coeff):
            p0s[n], p0ds[n], p0dds[n] = _next_legen_der(
                n, x, p0s[n - 1], p0s[n - 2], p0ds[n - 1], p0dds[n - 1])
    return coeffs
def _get_legen_table(ch_type, volume_integral=False, n_coeff=100,
                     n_interp=20000, force_calc=False):
    """Return a (generated) LUT of Legendre (derivative) polynomial coeffs"""
    if n_interp % 2 != 0:
        raise RuntimeError('n_interp must be even')
    fname = op.join(_get_extra_data_path(), 'tables')
    if not op.isdir(fname):
        # Updated due to API chang (GH 1167)
        os.makedirs(fname)
    if ch_type == 'meg':
        fname = op.join(fname, 'legder_%s_%s.bin' % (n_coeff, n_interp))
        leg_fun = _get_legen_der
        extra_str = ' derivative'
        lut_shape = (n_interp + 1, n_coeff, 3)
    else:  # 'eeg'
        fname = op.join(fname, 'legval_%s_%s.bin' % (n_coeff, n_interp))
        leg_fun = _get_legen
        extra_str = ''
        lut_shape = (n_interp + 1, n_coeff)
    if not op.isfile(fname) or force_calc:
        n_out = (n_interp // 2)
        logger.info('Generating Legendre%s table...' % extra_str)
        x_interp = np.arange(-n_out, n_out + 1, dtype=np.float64) / n_out
        lut = leg_fun(x_interp, n_coeff).astype(np.float32)
        if not force_calc:
            with open(fname, 'wb') as fid:
                fid.write(lut.tostring())
    else:
        logger.info('Reading Legendre%s table...' % extra_str)
        with open(fname, 'rb', buffering=0) as fid:
            lut = np.fromfile(fid, np.float32)
    lut.shape = lut_shape
    # we need this for the integration step
    n_fact = np.arange(1, n_coeff, dtype=float)
    if ch_type == 'meg':
        n_facts = list()  # multn, then mult, then multn * (n + 1)
        if volume_integral:
            n_facts.append(n_fact / ((2.0 * n_fact + 1.0)
                                     * (2.0 * n_fact + 3.0)))
        else:
            n_facts.append(n_fact / (2.0 * n_fact + 1.0))
        n_facts.append(n_facts[0] / (n_fact + 1.0))
        n_facts.append(n_facts[0] * (n_fact + 1.0))
        # skip the first set of coefficients because they are not used
        lut = lut[:, 1:, [0, 1, 1, 2]]  # for multiplicative convenience later
        # reshape this for convenience, too
        n_facts = np.array(n_facts)[[2, 0, 1, 1], :].T
        n_facts = np.ascontiguousarray(n_facts)
        n_fact = n_facts
    else:  # 'eeg'
        n_fact = (2.0 * n_fact + 1.0) * (2.0 * n_fact + 1.0) / n_fact
        # skip the first set of coefficients because they are not used
        lut = lut[:, 1:].copy()
    return lut, n_fact
def _get_legen_lut_fast(x, lut):
    """Return Legendre coefficients for given x values in -1<=x<=1"""
    # map into table vals (works for both vals and deriv tables)
    n_interp = (lut.shape[0] - 1.0)
    # equiv to "(x + 1.0) / 2.0) * n_interp" but faster
    mm = x * (n_interp / 2.0) + 0.5 * n_interp
    # nearest-neighbor version (could be decent enough...)
    idx = np.round(mm).astype(int)
    vals = lut[idx]
    return vals
def _get_legen_lut_accurate(x, lut):
    """Return Legendre coefficients for given x values in -1<=x<=1"""
    # map into table vals (works for both vals and deriv tables)
    n_interp = (lut.shape[0] - 1.0)
    # equiv to "(x + 1.0) / 2.0) * n_interp" but faster
    mm = x * (n_interp / 2.0) + 0.5 * n_interp
    # slower, more accurate interpolation version
    mm = np.minimum(mm, n_interp - 0.0000000001)
    idx = np.floor(mm).astype(int)
    w2 = mm - idx
    w2.shape += tuple([1] * (lut.ndim - w2.ndim))  # expand to correct size
    vals = (1 - w2) * lut[idx] + w2 * lut[idx + 1]
    return vals
def _comp_sum_eeg(beta, ctheta, lut_fun, n_fact):
    """Lead field dot products using Legendre polynomial (P_n) series"""
    # Compute the sum occurring in the evaluation.
    # The result is
    #   sums[:]    (2n+1)^2/n beta^n P_n
    coeffs = lut_fun(ctheta)
    betans = np.cumprod(np.tile(beta[:, np.newaxis], (1, n_fact.shape[0])),
                        axis=1)
    s0 = np.dot(coeffs * betans, n_fact)  # == weighted sum across cols
    return s0
def _comp_sums_meg(beta, ctheta, lut_fun, n_fact, volume_integral):
    """Lead field dot products using Legendre polynomial (P_n) series"""
    # Compute the sums occurring in the evaluation.
    # Two point magnetometers on the xz plane are assumed.
    # The four sums are:
    #  * sums[:, 0]    n(n+1)/(2n+1) beta^(n+1) P_n
    #  * sums[:, 1]    n/(2n+1) beta^(n+1) P_n'
    #  * sums[:, 2]    n/((2n+1)(n+1)) beta^(n+1) P_n'
    #  * sums[:, 3]    n/((2n+1)(n+1)) beta^(n+1) P_n''
    coeffs = lut_fun(ctheta)
    beta = (np.cumprod(np.tile(beta[:, np.newaxis], (1, n_fact.shape[0])),
                       axis=1) * beta[:, np.newaxis])
    # This is equivalent, but slower:
    # sums = np.sum(beta[:, :, np.newaxis] * n_fact * coeffs, axis=1)
    # sums = np.rollaxis(sums, 2)
    sums = np.einsum('ij,jk,ijk->ki', beta, n_fact, coeffs)
    return sums
###############################################################################
# SPHERE DOTS
def _fast_sphere_dot_r0(r, rr1, rr2, lr1, lr2, cosmags1, cosmags2,
                        w1, w2, volume_integral, lut, n_fact, ch_type):
    """Lead field dot product computation for M/EEG in the sphere model"""
    ct = np.einsum('ik,jk->ij', rr1, rr2)  # outer product, sum over coords
    # expand axes
    rr1 = rr1[:, np.newaxis, :]  # (n_rr1, n_rr2, n_coord) e.g. 4x4x3
    rr2 = rr2[np.newaxis, :, :]
    lr1lr2 = lr1[:, np.newaxis] * lr2[np.newaxis, :]
    beta = (r * r) / lr1lr2
    if ch_type == 'meg':
        sums = _comp_sums_meg(beta.flatten(), ct.flatten(), lut, n_fact,
                              volume_integral)
        sums.shape = (4,) + beta.shape
        # Accumulate the result, a little bit streamlined version
        #cosmags1 = cosmags1[:, np.newaxis, :]
        #cosmags2 = cosmags2[np.newaxis, :, :]
        #n1c1 = np.sum(cosmags1 * rr1, axis=2)
        #n1c2 = np.sum(cosmags1 * rr2, axis=2)
        #n2c1 = np.sum(cosmags2 * rr1, axis=2)
        #n2c2 = np.sum(cosmags2 * rr2, axis=2)
        #n1n2 = np.sum(cosmags1 * cosmags2, axis=2)
        n1c1 = np.einsum('ik,ijk->ij', cosmags1, rr1)
        n1c2 = np.einsum('ik,ijk->ij', cosmags1, rr2)
        n2c1 = np.einsum('jk,ijk->ij', cosmags2, rr1)
        n2c2 = np.einsum('jk,ijk->ij', cosmags2, rr2)
        n1n2 = np.einsum('ik,jk->ij', cosmags1, cosmags2)
        part1 = ct * n1c1 * n2c2
        part2 = n1c1 * n2c1 + n1c2 * n2c2
        result = (n1c1 * n2c2 * sums[0] +
                  (2.0 * part1 - part2) * sums[1] +
                  (n1n2 + part1 - part2) * sums[2] +
                  (n1c2 - ct * n1c1) * (n2c1 - ct * n2c2) * sums[3])
        # Give it a finishing touch!
        const = 4e-14 * np.pi  # This is \mu_0^2/4\pi
        result *= (const / lr1lr2)
        if volume_integral:
            result *= r
    else:  # 'eeg'
        sums = _comp_sum_eeg(beta.flatten(), ct.flatten(), lut, n_fact)
        sums.shape = beta.shape
        # Give it a finishing touch!
        eeg_const = 1.0 / (4.0 * np.pi)
        result = eeg_const * sums / lr1lr2
    # new we add them all up with weights
    if w1 is None:  # operating on surface, treat independently
        #result = np.sum(w2[np.newaxis, :] * result, axis=1)
        result = np.dot(result, w2)
    else:
        #result = np.sum((w1[:, np.newaxis] * w2[np.newaxis, :]) * result)
        result = np.einsum('i,j,ij', w1, w2, result)
    return result
def _do_self_dots(intrad, volume, coils, r0, ch_type, lut, n_fact, n_jobs):
    """Perform the lead field dot product integrations"""
    if ch_type == 'eeg':
        intrad *= 0.7
    # convert to normalized distances from expansion center
    rmags = [coil['rmag'] - r0[np.newaxis, :] for coil in coils]
    rlens = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags]
    rmags = [r / rl[:, np.newaxis] for r, rl in zip(rmags, rlens)]
    cosmags = [coil['cosmag'] for coil in coils]
    ws = [coil['w'] for coil in coils]
    parallel, p_fun, _ = parallel_func(_do_self_dots_subset, n_jobs)
    prods = parallel(p_fun(intrad, rmags, rlens, cosmags,
                           ws, volume, lut, n_fact, ch_type, idx)
                     for idx in np.array_split(np.arange(len(rmags)), n_jobs))
    products = np.sum(prods, axis=0)
    return products
def _do_self_dots_subset(intrad, rmags, rlens, cosmags, ws, volume, lut,
                         n_fact, ch_type, idx):
    """Helper for parallelization"""
    products = np.zeros((len(rmags), len(rmags)))
    for ci1 in idx:
        for ci2 in range(0, ci1 + 1):
            res = _fast_sphere_dot_r0(intrad, rmags[ci1], rmags[ci2],
                                      rlens[ci1], rlens[ci2],
                                      cosmags[ci1], cosmags[ci2],
                                      ws[ci1], ws[ci2], volume, lut,
                                      n_fact, ch_type)
            products[ci1, ci2] = res
            products[ci2, ci1] = res
    return products
def _do_surface_dots(intrad, volume, coils, surf, sel, r0, ch_type,
                     lut, n_fact, n_jobs):
    """Compute the map construction products"""
    virt_ref = False
    # convert to normalized distances from expansion center
    rmags = [coil['rmag'] - r0[np.newaxis, :] for coil in coils]
    rlens = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags]
    rmags = [r / rl[:, np.newaxis] for r, rl in zip(rmags, rlens)]
    cosmags = [coil['cosmag'] for coil in coils]
    ws = [coil['w'] for coil in coils]
    rref = None
    refl = None
    if ch_type == 'eeg':
        intrad *= 0.7
        if virt_ref:
            rref = virt_ref[np.newaxis, :] - r0[np.newaxis, :]
            refl = np.sqrt(np.sum(rref * rref, axis=1))
            rref /= refl[:, np.newaxis]
    rsurf = surf['rr'][sel] - r0[np.newaxis, :]
    lsurf = np.sqrt(np.sum(rsurf * rsurf, axis=1))
    rsurf /= lsurf[:, np.newaxis]
    this_nn = surf['nn'][sel]
    parallel, p_fun, _ = parallel_func(_do_surface_dots_subset, n_jobs)
    prods = parallel(p_fun(intrad, rsurf, rmags, rref, refl, lsurf, rlens,
                           this_nn, cosmags, ws, volume, lut, n_fact, ch_type,
                           idx)
                     for idx in np.array_split(np.arange(len(rmags)), n_jobs))
    products = np.sum(prods, axis=0)
    return products
def _do_surface_dots_subset(intrad, rsurf, rmags, rref, refl, lsurf, rlens,
                            this_nn, cosmags, ws, volume, lut, n_fact, ch_type,
                            idx):
    """Helper for parallelization"""
    products = np.zeros((len(rsurf), len(rmags)))
    for ci in idx:
        res = _fast_sphere_dot_r0(intrad, rsurf, rmags[ci],
                                  lsurf, rlens[ci],
                                  this_nn, cosmags[ci],
                                  None, ws[ci], volume, lut,
                                  n_fact, ch_type)
        if rref is not None:
            vres = _fast_sphere_dot_r0(intrad, rref, rmags[ci],
                                       refl, rlens[ci],
                                       None, ws[ci], volume,
                                       lut, n_fact, ch_type)
            products[:, ci] = res - vres
        else:
            products[:, ci] = res
    return products
 | |
| 
	# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
import tempfile
from test import safe_repr
from test.unit import MockTrue
from swift.common.swob import HTTPBadRequest, Request, HTTPException
from swift.common.http import HTTP_REQUEST_ENTITY_TOO_LARGE, \
    HTTP_BAD_REQUEST, HTTP_LENGTH_REQUIRED
from swift.common import constraints
class TestConstraints(unittest.TestCase):
    def assertIn(self, member, container, msg=None):
        """Copied from 2.7"""
        if member not in container:
            standardMsg = '%s not found in %s' % (safe_repr(member),
                                                  safe_repr(container))
            self.fail(self._formatMessage(msg, standardMsg))
    def test_check_metadata_empty(self):
        headers = {}
        self.assertEquals(constraints.check_metadata(Request.blank(
            '/', headers=headers), 'object'), None)
    def test_check_metadata_good(self):
        headers = {'X-Object-Meta-Name': 'Value'}
        self.assertEquals(constraints.check_metadata(Request.blank(
            '/', headers=headers), 'object'), None)
    def test_check_metadata_empty_name(self):
        headers = {'X-Object-Meta-': 'Value'}
        self.assert_(constraints.check_metadata(Request.blank(
            '/', headers=headers), 'object'), HTTPBadRequest)
    def test_check_metadata_name_length(self):
        name = 'a' * constraints.MAX_META_NAME_LENGTH
        headers = {'X-Object-Meta-%s' % name: 'v'}
        self.assertEquals(constraints.check_metadata(Request.blank(
            '/', headers=headers), 'object'), None)
        name = 'a' * (constraints.MAX_META_NAME_LENGTH + 1)
        headers = {'X-Object-Meta-%s' % name: 'v'}
        self.assertEquals(constraints.check_metadata(Request.blank(
            '/', headers=headers), 'object').status_int, HTTP_BAD_REQUEST)
        self.assertIn(
            ('X-Object-Meta-%s' % name).lower(),
            constraints.check_metadata(Request.blank(
                '/', headers=headers), 'object').body.lower())
    def test_check_metadata_value_length(self):
        value = 'a' * constraints.MAX_META_VALUE_LENGTH
        headers = {'X-Object-Meta-Name': value}
        self.assertEquals(constraints.check_metadata(Request.blank(
            '/', headers=headers), 'object'), None)
        value = 'a' * (constraints.MAX_META_VALUE_LENGTH + 1)
        headers = {'X-Object-Meta-Name': value}
        self.assertEquals(constraints.check_metadata(Request.blank(
            '/', headers=headers), 'object').status_int, HTTP_BAD_REQUEST)
        self.assertIn(
            'x-object-meta-name',
            constraints.check_metadata(Request.blank(
                '/', headers=headers),
                'object').body.lower())
        self.assertIn(
            str(constraints.MAX_META_VALUE_LENGTH),
            constraints.check_metadata(Request.blank(
                '/', headers=headers),
                'object').body)
    def test_check_metadata_count(self):
        headers = {}
        for x in xrange(constraints.MAX_META_COUNT):
            headers['X-Object-Meta-%d' % x] = 'v'
        self.assertEquals(constraints.check_metadata(Request.blank(
            '/', headers=headers), 'object'), None)
        headers['X-Object-Meta-Too-Many'] = 'v'
        self.assertEquals(constraints.check_metadata(Request.blank(
            '/', headers=headers), 'object').status_int, HTTP_BAD_REQUEST)
    def test_check_metadata_size(self):
        headers = {}
        size = 0
        chunk = constraints.MAX_META_NAME_LENGTH + \
            constraints.MAX_META_VALUE_LENGTH
        x = 0
        while size + chunk < constraints.MAX_META_OVERALL_SIZE:
            headers['X-Object-Meta-%04d%s' %
                    (x, 'a' * (constraints.MAX_META_NAME_LENGTH - 4))] = \
                'v' * constraints.MAX_META_VALUE_LENGTH
            size += chunk
            x += 1
        self.assertEquals(constraints.check_metadata(Request.blank(
            '/', headers=headers), 'object'), None)
        # add two more headers in case adding just one falls exactly on the
        # limit (eg one header adds 1024 and the limit is 2048)
        headers['X-Object-Meta-%04d%s' %
                (x, 'a' * (constraints.MAX_META_NAME_LENGTH - 4))] = \
            'v' * constraints.MAX_META_VALUE_LENGTH
        headers['X-Object-Meta-%04d%s' %
                (x + 1, 'a' * (constraints.MAX_META_NAME_LENGTH - 4))] = \
            'v' * constraints.MAX_META_VALUE_LENGTH
        self.assertEquals(constraints.check_metadata(Request.blank(
            '/', headers=headers), 'object').status_int, HTTP_BAD_REQUEST)
    def test_check_object_creation_content_length(self):
        headers = {'Content-Length': str(constraints.MAX_FILE_SIZE),
                   'Content-Type': 'text/plain'}
        self.assertEquals(constraints.check_object_creation(Request.blank(
            '/', headers=headers), 'object_name'), None)
        headers = {'Content-Length': str(constraints.MAX_FILE_SIZE + 1),
                   'Content-Type': 'text/plain'}
        self.assertEquals(constraints.check_object_creation(
            Request.blank('/', headers=headers), 'object_name').status_int,
            HTTP_REQUEST_ENTITY_TOO_LARGE)
        headers = {'Transfer-Encoding': 'chunked',
                   'Content-Type': 'text/plain'}
        self.assertEquals(constraints.check_object_creation(Request.blank(
            '/', headers=headers), 'object_name'), None)
        headers = {'Content-Type': 'text/plain'}
        self.assertEquals(constraints.check_object_creation(
            Request.blank('/', headers=headers), 'object_name').status_int,
            HTTP_LENGTH_REQUIRED)
    def test_check_object_creation_name_length(self):
        headers = {'Transfer-Encoding': 'chunked',
                   'Content-Type': 'text/plain'}
        name = 'o' * constraints.MAX_OBJECT_NAME_LENGTH
        self.assertEquals(constraints.check_object_creation(Request.blank(
            '/', headers=headers), name), None)
        name = 'o' * (constraints.MAX_OBJECT_NAME_LENGTH + 1)
        self.assertEquals(constraints.check_object_creation(
            Request.blank('/', headers=headers), name).status_int,
            HTTP_BAD_REQUEST)
    def test_check_object_creation_content_type(self):
        headers = {'Transfer-Encoding': 'chunked',
                   'Content-Type': 'text/plain'}
        self.assertEquals(constraints.check_object_creation(Request.blank(
            '/', headers=headers), 'object_name'), None)
        headers = {'Transfer-Encoding': 'chunked'}
        self.assertEquals(constraints.check_object_creation(
            Request.blank('/', headers=headers), 'object_name').status_int,
            HTTP_BAD_REQUEST)
    def test_check_object_creation_bad_content_type(self):
        headers = {'Transfer-Encoding': 'chunked',
                   'Content-Type': '\xff\xff'}
        resp = constraints.check_object_creation(
            Request.blank('/', headers=headers), 'object_name')
        self.assertEquals(resp.status_int, HTTP_BAD_REQUEST)
        self.assert_('Content-Type' in resp.body)
    def test_check_mount(self):
        self.assertFalse(constraints.check_mount('', ''))
        with mock.patch("swift.common.constraints.ismount", MockTrue()):
            self.assertTrue(constraints.check_mount('/srv', '1'))
            self.assertTrue(constraints.check_mount('/srv', 'foo-bar'))
            self.assertTrue(constraints.check_mount(
                '/srv', '003ed03c-242a-4b2f-bee9-395f801d1699'))
            self.assertFalse(constraints.check_mount('/srv', 'foo bar'))
            self.assertFalse(constraints.check_mount('/srv', 'foo/bar'))
            self.assertFalse(constraints.check_mount('/srv', 'foo?bar'))
    def test_check_float(self):
        self.assertFalse(constraints.check_float(''))
        self.assertTrue(constraints.check_float('0'))
    def test_check_utf8(self):
        unicode_sample = u'\uc77c\uc601'
        valid_utf8_str = unicode_sample.encode('utf-8')
        invalid_utf8_str = unicode_sample.encode('utf-8')[::-1]
        unicode_with_null = u'abc\u0000def'
        utf8_with_null = unicode_with_null.encode('utf-8')
        for false_argument in [None,
                               '',
                               invalid_utf8_str,
                               unicode_with_null,
                               utf8_with_null]:
            self.assertFalse(constraints.check_utf8(false_argument))
        for true_argument in ['this is ascii and utf-8, too',
                              unicode_sample,
                              valid_utf8_str]:
            self.assertTrue(constraints.check_utf8(true_argument))
    def test_validate_bad_meta(self):
        req = Request.blank(
            '/v/a/c/o',
            headers={'x-object-meta-hello':
                     'ab' * constraints.MAX_HEADER_SIZE})
        self.assertEquals(constraints.check_metadata(req, 'object').status_int,
                          HTTP_BAD_REQUEST)
        self.assertIn('x-object-meta-hello', constraints.check_metadata(req,
                      'object').body.lower())
    def test_validate_constraints(self):
        c = constraints
        self.assertTrue(c.MAX_META_OVERALL_SIZE > c.MAX_META_NAME_LENGTH)
        self.assertTrue(c.MAX_META_OVERALL_SIZE > c.MAX_META_VALUE_LENGTH)
        self.assertTrue(c.MAX_HEADER_SIZE > c.MAX_META_NAME_LENGTH)
        self.assertTrue(c.MAX_HEADER_SIZE > c.MAX_META_VALUE_LENGTH)
    def test_validate_copy_from(self):
        req = Request.blank(
            '/v/a/c/o',
            headers={'x-copy-from': 'c/o2'})
        src_cont, src_obj = constraints.check_copy_from_header(req)
        self.assertEqual(src_cont, 'c')
        self.assertEqual(src_obj, 'o2')
        req = Request.blank(
            '/v/a/c/o',
            headers={'x-copy-from': 'c/subdir/o2'})
        src_cont, src_obj = constraints.check_copy_from_header(req)
        self.assertEqual(src_cont, 'c')
        self.assertEqual(src_obj, 'subdir/o2')
        req = Request.blank(
            '/v/a/c/o',
            headers={'x-copy-from': '/c/o2'})
        src_cont, src_obj = constraints.check_copy_from_header(req)
        self.assertEqual(src_cont, 'c')
        self.assertEqual(src_obj, 'o2')
    def test_validate_bad_copy_from(self):
        req = Request.blank(
            '/v/a/c/o',
            headers={'x-copy-from': 'bad_object'})
        self.assertRaises(HTTPException,
                          constraints.check_copy_from_header, req)
class TestConstraintsConfig(unittest.TestCase):
    def test_default_constraints(self):
        for key in constraints.DEFAULT_CONSTRAINTS:
            # if there is local over-rides in swift.conf we just continue on
            if key in constraints.OVERRIDE_CONSTRAINTS:
                continue
            # module level attrs (that aren't in OVERRIDE) should have the
            # same value as the DEFAULT map
            module_level_value = getattr(constraints, key.upper())
            self.assertEquals(constraints.DEFAULT_CONSTRAINTS[key],
                              module_level_value)
    def test_effective_constraints(self):
        for key in constraints.DEFAULT_CONSTRAINTS:
            # module level attrs should always mirror the same value as the
            # EFFECTIVE map
            module_level_value = getattr(constraints, key.upper())
            self.assertEquals(constraints.EFFECTIVE_CONSTRAINTS[key],
                              module_level_value)
            # if there are local over-rides in swift.conf those should be
            # reflected in the EFFECTIVE, otherwise we expect the DEFAULTs
            self.assertEquals(constraints.EFFECTIVE_CONSTRAINTS[key],
                              constraints.OVERRIDE_CONSTRAINTS.get(
                                  key, constraints.DEFAULT_CONSTRAINTS[key]))
    def test_override_constraints(self):
        try:
            with tempfile.NamedTemporaryFile() as f:
                f.write('[swift-constraints]\n')
                # set everything to 1
                for key in constraints.DEFAULT_CONSTRAINTS:
                    f.write('%s = 1\n' % key)
                f.flush()
                with mock.patch.object(constraints, 'SWIFT_CONF_FILE',
                                       f.name):
                    constraints.reload_constraints()
            for key in constraints.DEFAULT_CONSTRAINTS:
                # module level attrs should all be 1
                module_level_value = getattr(constraints, key.upper())
                self.assertEquals(module_level_value, 1)
                # all keys should be in OVERRIDE
                self.assertEquals(constraints.OVERRIDE_CONSTRAINTS[key],
                                  module_level_value)
                # module level attrs should always mirror the same value as
                # the EFFECTIVE map
                self.assertEquals(constraints.EFFECTIVE_CONSTRAINTS[key],
                                  module_level_value)
        finally:
            constraints.reload_constraints()
    def test_reload_reset(self):
        try:
            with tempfile.NamedTemporaryFile() as f:
                f.write('[swift-constraints]\n')
                # set everything to 1
                for key in constraints.DEFAULT_CONSTRAINTS:
                    f.write('%s = 1\n' % key)
                f.flush()
                with mock.patch.object(constraints, 'SWIFT_CONF_FILE',
                                       f.name):
                    constraints.reload_constraints()
            self.assertTrue(constraints.SWIFT_CONSTRAINTS_LOADED)
            self.assertEquals(sorted(constraints.DEFAULT_CONSTRAINTS.keys()),
                              sorted(constraints.OVERRIDE_CONSTRAINTS.keys()))
            # file is now deleted...
            with mock.patch.object(constraints, 'SWIFT_CONF_FILE',
                                   f.name):
                constraints.reload_constraints()
            # no constraints have been loaded from non-existant swift.conf
            self.assertFalse(constraints.SWIFT_CONSTRAINTS_LOADED)
            # no constraints are in OVERRIDE
            self.assertEquals([], constraints.OVERRIDE_CONSTRAINTS.keys())
            # the EFFECTIVE constraints mirror DEFAULT
            self.assertEquals(constraints.EFFECTIVE_CONSTRAINTS,
                              constraints.DEFAULT_CONSTRAINTS)
        finally:
            constraints.reload_constraints()
if __name__ == '__main__':
    unittest.main()
 | |
| 
	from numpy import ascontiguousarray, atleast_1d, atleast_2d, sqrt, std
from ..cov import EyeCov, LinearCov, SumCov
from ..lik import BernoulliProdLik, BinomialProdLik, PoissonProdLik
from ..link import LogitLink, LogLink
from ..mean import LinearMean, OffsetMean, SumMean
from ._ggp import GGPSampler
def bernoulli_sample(
    offset,
    G,
    heritability=0.5,
    causal_variants=None,
    causal_variance=0,
    random_state=None,
):
    r"""Bernoulli likelihood sampling.
    Sample according to
    .. math::
        \mathbf y \sim \prod_{i=1}^n
        \text{Bernoulli}(\mu_i = \text{logit}(z_i))
        \mathcal N(~ o \mathbf 1 + \mathbf a^\intercal \boldsymbol\alpha;
        ~ (h^2 - v_c)\mathrm G^\intercal\mathrm G +
        (1-h^2-v_c)\mathrm I ~)
    using the canonical Logit link function to define the conditional Bernoulli
    mean :math:`\mu_i`.
    The causal :math:`\mathbf a` covariates and the corresponding effect-sizes
    are randomly draw according to the following idea. The ``causal_variants``,
    if given, are first mean-zero and std-one normalized and then having
    its elements divided by the squared-root the the number of variances::
        causal_variants = _stdnorm(causal_variants, axis=0)
        causal_variants /= sqrt(causal_variants.shape[1])
    The causal effect-sizes :math:`\boldsymbol\alpha` are draw from
    :math:`\{-1, +1\}` and subsequently normalized for mean-zero and std-one""
    Parameters
    ----------
    random_state : random_state
        Set the initial random state.
    Example
    -------
    .. doctest::
        >>> from glimix_core.random import bernoulli_sample
        >>> from numpy.random import RandomState
        >>> offset = 5
        >>> G = [[1, -1], [2, 1]]
        >>> bernoulli_sample(offset, G, random_state=RandomState(0))
        array([1., 1.])
    """
    link = LogitLink()
    mean, cov = _mean_cov(
        offset, G, heritability, causal_variants, causal_variance, random_state
    )
    lik = BernoulliProdLik(link)
    sampler = GGPSampler(lik, mean, cov)
    return sampler.sample(random_state)
def binomial_sample(
    ntrials,
    offset,
    G,
    heritability=0.5,
    causal_variants=None,
    causal_variance=0,
    random_state=None,
):
    """Binomial likelihood sampling.
    Parameters
    ----------
    random_state : random_state
        Set the initial random state.
    Example
    -------
    .. doctest::
        >>> from glimix_core.random import binomial_sample
        >>> from numpy.random import RandomState
        >>> ntrials = [5, 15]
        >>> offset = 0.5
        >>> G = [[1, -1], [2, 1]]
        >>> binomial_sample(ntrials, offset, G, random_state=RandomState(0))
        array([ 2., 14.])
    """
    link = LogitLink()
    mean, cov = _mean_cov(
        offset, G, heritability, causal_variants, causal_variance, random_state
    )
    lik = BinomialProdLik(ntrials, link)
    sampler = GGPSampler(lik, mean, cov)
    return sampler.sample(random_state)
def poisson_sample(
    offset,
    G,
    heritability=0.5,
    causal_variants=None,
    causal_variance=0,
    random_state=None,
):
    """Poisson likelihood sampling.
    Parameters
    ----------
    random_state : random_state
        Set the initial random state.
    Example
    -------
    .. doctest::
        >>> from glimix_core.random import poisson_sample
        >>> from numpy.random import RandomState
        >>> offset = -0.5
        >>> G = [[0.5, -1], [2, 1]]
        >>> poisson_sample(offset, G, random_state=RandomState(0))
        array([0, 6])
    """
    mean, cov = _mean_cov(
        offset, G, heritability, causal_variants, causal_variance, random_state
    )
    link = LogLink()
    lik = PoissonProdLik(link)
    sampler = GGPSampler(lik, mean, cov)
    return sampler.sample(random_state)
def _causal_mean(causal_variants, causal_variance, random):
    causal_variants = atleast_2d(atleast_1d(causal_variants).T).T
    causal_variants = _stdnorm(causal_variants, axis=0)
    causal_variants /= sqrt(causal_variants.shape[1])
    p = causal_variants.shape[1]
    directions = random.randn(p)
    directions[directions < 0.5] = -1
    directions[directions >= 0.5] = +1
    s = std(directions)
    if s > 0:
        directions /= s
    directions *= sqrt(causal_variance)
    directions -= directions.mean()
    mean = LinearMean(causal_variants)
    mean.effsizes = directions
    return mean
def _mean_cov(offset, G, heritability, causal_variants, causal_variance, random_state):
    G = ascontiguousarray(G, dtype=float)
    nsamples = G.shape[0]
    G = _stdnorm(G, axis=0)
    G /= sqrt(G.shape[1])
    mean1 = OffsetMean(nsamples)
    mean1.offset = offset
    cov1 = LinearCov(G)
    cov2 = EyeCov(nsamples)
    cov = SumCov([cov1, cov2])
    cov1.scale = heritability - causal_variance
    cov2.scale = 1 - heritability - causal_variance
    means = []
    means.append(mean1)
    if causal_variants is not None:
        means.append(_causal_mean(causal_variants, causal_variance, random_state))
    mean = SumMean(means)
    return mean, cov
def _stdnorm(X, axis=None, out=None):
    X = ascontiguousarray(X, dtype=float)
    if out is None:
        out = X.copy()
    m = out.mean(axis)
    s = out.std(axis)
    ok = s > 0
    out -= m
    out[..., ok] /= s[ok]
    return out
 | |
| 
	#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> a = sc.accumulator(1)
>>> a.value
1
>>> a.value = 2
>>> a.value
2
>>> a += 5
>>> a.value
7
>>> sc.accumulator(1.0).value
1.0
>>> sc.accumulator(1j).value
1j
>>> rdd = sc.parallelize([1,2,3])
>>> def f(x):
...     global a
...     a += x
>>> rdd.foreach(f)
>>> a.value
13
>>> b = sc.accumulator(0)
>>> def g(x):
...     b.add(x)
>>> rdd.foreach(g)
>>> b.value
6
>>> from pyspark.accumulators import AccumulatorParam
>>> class VectorAccumulatorParam(AccumulatorParam):
...     def zero(self, value):
...         return [0.0] * len(value)
...     def addInPlace(self, val1, val2):
...         for i in range(len(val1)):
...              val1[i] += val2[i]
...         return val1
>>> va = sc.accumulator([1.0, 2.0, 3.0], VectorAccumulatorParam())
>>> va.value
[1.0, 2.0, 3.0]
>>> def g(x):
...     global va
...     va += [x] * 3
>>> rdd.foreach(g)
>>> va.value
[7.0, 8.0, 9.0]
>>> rdd.map(lambda x: a.value).collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
    ...
Py4JJavaError:...
>>> def h(x):
...     global a
...     a.value = 7
>>> rdd.foreach(h) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
    ...
Py4JJavaError:...
>>> sc.accumulator([1.0, 2.0, 3.0]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
    ...
TypeError:...
"""
import sys
import select
import struct
if sys.version < '3':
    import SocketServer
else:
    import socketserver as SocketServer
import threading
from pyspark.cloudpickle import CloudPickler
from pyspark.serializers import read_int, PickleSerializer
__all__ = ['Accumulator', 'AccumulatorParam']
pickleSer = PickleSerializer()
# Holds accumulators registered on the current machine, keyed by ID. This is then used to send
# the local accumulator updates back to the driver program at the end of a task.
_accumulatorRegistry = {}
def _deserialize_accumulator(aid, zero_value, accum_param):
    from pyspark.accumulators import _accumulatorRegistry
    accum = Accumulator(aid, zero_value, accum_param)
    accum._deserialized = True
    _accumulatorRegistry[aid] = accum
    return accum
class Accumulator(object):
    """
    A shared variable that can be accumulated, i.e., has a commutative and associative "add"
    operation. Worker tasks on a Spark cluster can add values to an Accumulator with the C{+=}
    operator, but only the driver program is allowed to access its value, using C{value}.
    Updates from the workers get propagated automatically to the driver program.
    While C{SparkContext} supports accumulators for primitive data types like C{int} and
    C{float}, users can also define accumulators for custom types by providing a custom
    L{AccumulatorParam} object. Refer to the doctest of this module for an example.
    """
    def __init__(self, aid, value, accum_param):
        """Create a new Accumulator with a given initial value and AccumulatorParam object"""
        from pyspark.accumulators import _accumulatorRegistry
        self.aid = aid
        self.accum_param = accum_param
        self._value = value
        self._deserialized = False
        _accumulatorRegistry[aid] = self
    def __reduce__(self):
        """Custom serialization; saves the zero value from our AccumulatorParam"""
        param = self.accum_param
        return (_deserialize_accumulator, (self.aid, param.zero(self._value), param))
    @property
    def value(self):
        """Get the accumulator's value; only usable in driver program"""
        if self._deserialized:
            raise Exception("Accumulator.value cannot be accessed inside tasks")
        return self._value
    @value.setter
    def value(self, value):
        """Sets the accumulator's value; only usable in driver program"""
        if self._deserialized:
            raise Exception("Accumulator.value cannot be accessed inside tasks")
        self._value = value
    def add(self, term):
        """Adds a term to this accumulator's value"""
        self._value = self.accum_param.addInPlace(self._value, term)
    def __iadd__(self, term):
        """The += operator; adds a term to this accumulator's value"""
        self.add(term)
        return self
    def __str__(self):
        return str(self._value)
    def __repr__(self):
        return "Accumulator<id=%i, value=%s>" % (self.aid, self._value)
class AccumulatorParam(object):
    """
    Helper object that defines how to accumulate values of a given type.
    """
    def zero(self, value):
        """
        Provide a "zero value" for the type, compatible in dimensions with the
        provided C{value} (e.g., a zero vector)
        """
        raise NotImplementedError
    def addInPlace(self, value1, value2):
        """
        Add two values of the accumulator's data type, returning a new value;
        for efficiency, can also update C{value1} in place and return it.
        """
        raise NotImplementedError
class AddingAccumulatorParam(AccumulatorParam):
    """
    An AccumulatorParam that uses the + operators to add values. Designed for simple types
    such as integers, floats, and lists. Requires the zero value for the underlying type
    as a parameter.
    """
    def __init__(self, zero_value):
        self.zero_value = zero_value
    def zero(self, value):
        return self.zero_value
    def addInPlace(self, value1, value2):
        value1 += value2
        return value1
# Singleton accumulator params for some standard types
INT_ACCUMULATOR_PARAM = AddingAccumulatorParam(0)
FLOAT_ACCUMULATOR_PARAM = AddingAccumulatorParam(0.0)
COMPLEX_ACCUMULATOR_PARAM = AddingAccumulatorParam(0.0j)
class _UpdateRequestHandler(SocketServer.StreamRequestHandler):
    """
    This handler will keep polling updates from the same socket until the
    server is shutdown.
    """
    def handle(self):
        from pyspark.accumulators import _accumulatorRegistry
        while not self.server.server_shutdown:
            # Poll every 1 second for new data -- don't block in case of shutdown.
            r, _, _ = select.select([self.rfile], [], [], 1)
            if self.rfile in r:
                num_updates = read_int(self.rfile)
                for _ in range(num_updates):
                    (aid, update) = pickleSer._read_with_length(self.rfile)
                    _accumulatorRegistry[aid] += update
                # Write a byte in acknowledgement
                self.wfile.write(struct.pack("!b", 1))
class AccumulatorServer(SocketServer.TCPServer):
    """
    A simple TCP server that intercepts shutdown() in order to interrupt
    our continuous polling on the handler.
    """
    server_shutdown = False
    def shutdown(self):
        self.server_shutdown = True
        SocketServer.TCPServer.shutdown(self)
        self.server_close()
def _start_update_server():
    """Start a TCP server to receive accumulator updates in a daemon thread, and returns it"""
    server = AccumulatorServer(("localhost", 0), _UpdateRequestHandler)
    thread = threading.Thread(target=server.serve_forever)
    thread.daemon = True
    thread.start()
    return server
if __name__ == "__main__":
    import doctest
    doctest.testmod()
 | |
| 
	"""
Linear Discriminant Analysis (LDA)
"""
# Authors: Clemens Brunner
#          Martin Billinger
#          Matthieu Perrot
#          Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .base import BaseEstimator, TransformerMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .preprocessing import StandardScaler
__all__ = ['LDA']
def _cov(X, shrinkage=None):
    """Estimate covariance matrix (using optional shrinkage).
    Parameters
    ----------
    X : array-like, shape (n_samples, n_features)
        Input data.
    shrinkage : string or float, optional
        Shrinkage parameter, possible values:
          - None or 'empirical': no shrinkage (default).
          - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
          - float between 0 and 1: fixed shrinkage parameter.
    Returns
    -------
    s : array, shape (n_features, n_features)
        Estimated covariance matrix.
    """
    shrinkage = "empirical" if shrinkage is None else shrinkage
    if isinstance(shrinkage, string_types):
        if shrinkage == 'auto':
            sc = StandardScaler()  # standardize features
            X = sc.fit_transform(X)
            s = sc.std_ * ledoit_wolf(X)[0] * sc.std_  # scale back
        elif shrinkage == 'empirical':
            s = empirical_covariance(X)
        else:
            raise ValueError('unknown shrinkage parameter')
    elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
        if shrinkage < 0 or shrinkage > 1:
            raise ValueError('shrinkage parameter must be between 0 and 1')
        s = shrunk_covariance(empirical_covariance(X), shrinkage)
    else:
        raise TypeError('shrinkage must be of string or int type')
    return s
def _class_means(X, y):
    """Compute class means.
    Parameters
    ----------
    X : array-like, shape (n_samples, n_features)
        Input data.
    y : array-like, shape (n_samples,) or (n_samples, n_targets)
        Target values.
    Returns
    -------
    means : array-like, shape (n_features,)
        Class means.
    """
    means = []
    classes = np.unique(y)
    for group in classes:
        Xg = X[y == group, :]
        means.append(Xg.mean(0))
    return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
    """Compute class covariance matrix.
    Parameters
    ----------
    X : array-like, shape (n_samples, n_features)
        Input data.
    y : array-like, shape (n_samples,) or (n_samples, n_targets)
        Target values.
    priors : array-like, shape (n_classes,)
        Class priors.
    shrinkage : string or float, optional
        Shrinkage parameter, possible values:
          - None: no shrinkage (default).
          - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
          - float between 0 and 1: fixed shrinkage parameter.
    Returns
    -------
    cov : array-like, shape (n_features, n_features)
        Class covariance matrix.
    """
    classes = np.unique(y)
    covs = []
    for group in classes:
        Xg = X[y == group, :]
        covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
    return np.average(covs, axis=0, weights=priors)
class LDA(BaseEstimator, LinearClassifierMixin, TransformerMixin):
    """Linear Discriminant Analysis (LDA).
    A classifier with a linear decision boundary, generated by fitting class
    conditional densities to the data and using Bayes' rule.
    The model fits a Gaussian density to each class, assuming that all classes
    share the same covariance matrix.
    The fitted model can also be used to reduce the dimensionality of the input
    by projecting it to the most discriminative directions.
    Parameters
    ----------
    solver : string, optional
        Solver to use, possible values:
          - 'svd': Singular value decomposition (default). Does not compute the
                covariance matrix, therefore this solver is recommended for
                data with a large number of features.
          - 'lsqr': Least squares solution, can be combined with shrinkage.
          - 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
    shrinkage : string or float, optional
        Shrinkage parameter, possible values:
          - None: no shrinkage (default).
          - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
          - float between 0 and 1: fixed shrinkage parameter.
        Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
    priors : array, optional, shape (n_classes,)
        Class priors.
    n_components : int, optional
        Number of components (< n_classes - 1) for dimensionality reduction.
    store_covariance : bool, optional
        Additionally compute class covariance matrix (default False).
    tol : float, optional
        Threshold used for rank estimation in SVD solver.
    Attributes
    ----------
    coef_ : array, shape (n_features,) or (n_classes, n_features)
        Weight vector(s).
    intercept_ : array, shape (n_features,)
        Intercept term.
    covariance_ : array-like, shape (n_features, n_features)
        Covariance matrix (shared by all classes).
    means_ : array-like, shape (n_classes, n_features)
        Class means.
    priors_ : array-like, shape (n_classes,)
        Class priors (sum to 1).
    scalings_ : array-like, shape (rank, n_classes - 1)
        Scaling of the features in the space spanned by the class centroids.
    xbar_ : array-like, shape (n_features,)
        Overall mean.
    classes_ : array-like, shape (n_classes,)
        Unique class labels.
    See also
    --------
    sklearn.qda.QDA: Quadratic discriminant analysis
    Notes
    -----
    The default solver is 'svd'. It can perform both classification and
    transform, and it does not rely on the calculation of the covariance
    matrix. This can be an advantage in situations where the number of features
    is large. However, the 'svd' solver cannot be used with shrinkage.
    The 'lsqr' solver is an efficient algorithm that only works for
    classification. It supports shrinkage.
    The 'eigen' solver is based on the optimization of the between class
    scatter to within class scatter ratio. It can be used for both
    classification and transform, and it supports shrinkage. However, the
    'eigen' solver needs to compute the covariance matrix, so it might not be
    suitable for situations with a high number of features.
    Examples
    --------
    >>> import numpy as np
    >>> from sklearn.lda import LDA
    >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
    >>> y = np.array([1, 1, 1, 2, 2, 2])
    >>> clf = LDA()
    >>> clf.fit(X, y)
    LDA(n_components=None, priors=None, shrinkage=None, solver='svd',
      store_covariance=False, tol=0.0001)
    >>> print(clf.predict([[-0.8, -1]]))
    [1]
    """
    def __init__(self, solver='svd', shrinkage=None, priors=None,
                 n_components=None, store_covariance=False, tol=1e-4):
        self.solver = solver
        self.shrinkage = shrinkage
        self.priors = priors
        self.n_components = n_components
        self.store_covariance = store_covariance  # used only in svd solver
        self.tol = tol  # used only in svd solver
    def _solve_lsqr(self, X, y, shrinkage):
        """Least squares solver.
        The least squares solver computes a straightforward solution of the
        optimal decision rule based directly on the discriminant functions. It
        can only be used for classification (with optional shrinkage), because
        estimation of eigenvectors is not performed. Therefore, dimensionality
        reduction with the transform is not supported.
        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            Training data.
        y : array-like, shape (n_samples,) or (n_samples, n_classes)
            Target values.
        shrinkage : string or float, optional
            Shrinkage parameter, possible values:
              - None: no shrinkage (default).
              - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
              - float between 0 and 1: fixed shrinkage parameter.
        Notes
        -----
        This solver is based on [1]_, section 2.6.2, pp. 39-41.
        References
        ----------
        .. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
           (Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
           0-471-05669-3.
        """
        self.means_ = _class_means(X, y)
        self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
        self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
        self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
                           + np.log(self.priors_))
    def _solve_eigen(self, X, y, shrinkage):
        """Eigenvalue solver.
        The eigenvalue solver computes the optimal solution of the Rayleigh
        coefficient (basically the ratio of between class scatter to within
        class scatter). This solver supports both classification and
        dimensionality reduction (with optional shrinkage).
        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            Training data.
        y : array-like, shape (n_samples,) or (n_samples, n_targets)
            Target values.
        shrinkage : string or float, optional
            Shrinkage parameter, possible values:
              - None: no shrinkage (default).
              - 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
              - float between 0 and 1: fixed shrinkage constant.
        Notes
        -----
        This solver is based on [1]_, section 3.8.3, pp. 121-124.
        References
        ----------
        .. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
           (Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
           0-471-05669-3.
        """
        self.means_ = _class_means(X, y)
        self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
        Sw = self.covariance_  # within scatter
        St = _cov(X, shrinkage)  # total scatter
        Sb = St - Sw  # between scatter
        evals, evecs = linalg.eigh(Sb, Sw)
        evecs = evecs[:, np.argsort(evals)[::-1]]  # sort eigenvectors
        # evecs /= np.linalg.norm(evecs, axis=0)  # doesn't work with numpy 1.6
        evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
        self.scalings_ = evecs
        self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
        self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
                           + np.log(self.priors_))
    def _solve_svd(self, X, y, store_covariance=False, tol=1.0e-4):
        """SVD solver.
        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            Training data.
        y : array-like, shape (n_samples,) or (n_samples, n_targets)
            Target values.
        store_covariance : bool, optional
            Additionally compute class covariance matrix (default False).
        tol : float, optional
            Threshold used for rank estimation.
        """
        n_samples, n_features = X.shape
        n_classes = len(self.classes_)
        self.means_ = _class_means(X, y)
        if store_covariance:
            self.covariance_ = _class_cov(X, y, self.priors_)
        Xc = []
        for idx, group in enumerate(self.classes_):
            Xg = X[y == group, :]
            Xc.append(Xg - self.means_[idx])
        self.xbar_ = np.dot(self.priors_, self.means_)
        Xc = np.concatenate(Xc, axis=0)
        # 1) within (univariate) scaling by with classes std-dev
        std = Xc.std(axis=0)
        # avoid division by zero in normalization
        std[std == 0] = 1.
        fac = 1. / (n_samples - n_classes)
        # 2) Within variance scaling
        X = np.sqrt(fac) * (Xc / std)
        # SVD of centered (within)scaled data
        U, S, V = linalg.svd(X, full_matrices=False)
        rank = np.sum(S > tol)
        if rank < n_features:
            warnings.warn("Variables are collinear.")
        # Scaling of within covariance is: V' 1/S
        scalings = (V[:rank] / std).T / S[:rank]
        # 3) Between variance scaling
        # Scale weighted centers
        X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
                    (self.means_ - self.xbar_).T).T, scalings)
        # Centers are living in a space with n_classes-1 dim (maximum)
        # Use SVD to find projection in the space spanned by the
        # (n_classes) centers
        _, S, V = linalg.svd(X, full_matrices=0)
        rank = np.sum(S > tol * S[0])
        self.scalings_ = np.dot(scalings, V.T[:, :rank])
        coef = np.dot(self.means_ - self.xbar_, self.scalings_)
        self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
                           + np.log(self.priors_))
        self.coef_ = np.dot(coef, self.scalings_.T)
        self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
    def fit(self, X, y, store_covariance=False, tol=1.0e-4):
        """Fit LDA model according to the given training data and parameters.
        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            Training data.
        y : array, shape (n_samples,)
            Target values.
        """
        if store_covariance:
            warnings.warn("'store_covariance' was moved to the __init__()"
                          "method in version 0.16 and will be removed from"
                          "fit() in version 0.18.", DeprecationWarning)
        else:
            store_covariance = self.store_covariance
        if tol != 1.0e-4:
            warnings.warn("'tol' was moved to __init__() method in version"
                          " 0.16 and will be removed from fit() in 0.18",
                          DeprecationWarning)
            self.tol = tol
        X, y = check_X_y(X, y)
        self.classes_ = unique_labels(y)
        if self.priors is None:  # estimate priors from sample
            _, y_t = np.unique(y, return_inverse=True)  # non-negative ints
            self.priors_ = np.bincount(y_t) / float(len(y))
        else:
            self.priors_ = self.priors
        if self.solver == 'svd':
            if self.shrinkage is not None:
                raise NotImplementedError('shrinkage not supported')
            self._solve_svd(X, y, store_covariance=store_covariance, tol=tol)
        elif self.solver == 'lsqr':
            self._solve_lsqr(X, y, shrinkage=self.shrinkage)
        elif self.solver == 'eigen':
            self._solve_eigen(X, y, shrinkage=self.shrinkage)
        else:
            raise ValueError("unknown solver {} (valid solvers are 'svd', "
                             "'lsqr', and 'eigen').".format(self.solver))
        if self.classes_.size == 2:  # treat binary case as a special case
            self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
            self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
                                       ndmin=1)
        return self
    def transform(self, X):
        """Project data to maximize class separation.
        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            Input data.
        Returns
        -------
        X_new : array, shape (n_samples, n_components)
            Transformed data.
        """
        check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
        X = check_array(X)
        if self.solver == 'lsqr':
            raise NotImplementedError("transform not implemented for 'lsqr' "
                                      "solver (use 'svd' or 'eigen').")
        elif self.solver == 'svd':
            X_new = np.dot(X - self.xbar_, self.scalings_)
        elif self.solver == 'eigen':
            X_new = np.dot(X, self.scalings_)
        n_components = X.shape[1] if self.n_components is None \
            else self.n_components
        return X_new[:, :n_components]
    def predict_proba(self, X):
        """Estimate probability.
        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            Input data.
        Returns
        -------
        C : array, shape (n_samples, n_classes)
            Estimated probabilities.
        """
        prob = self.decision_function(X)
        prob *= -1
        np.exp(prob, prob)
        prob += 1
        np.reciprocal(prob, prob)
        if len(self.classes_) == 2:  # binary case
            return np.column_stack([1 - prob, prob])
        else:
            # OvR normalization, like LibLinear's predict_probability
            prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
            return prob
    def predict_log_proba(self, X):
        """Estimate log probability.
        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            Input data.
        Returns
        -------
        C : array, shape (n_samples, n_classes)
            Estimated log probabilities.
        """
        return np.log(self.predict_proba(X))
 | |
| 
	from js9 import j
def get_stats_collector(service):
    stats_collectors_services = service.consumers.get('stats_collector')
    if stats_collectors_services:
        return stats_collectors_services[0]
def get_statsdb(service):
    statsdb_services = service.aysrepo.servicesFind(role='statsdb')
    if statsdb_services:
        return statsdb_services[0]
def get_version(job):
    from zeroos.orchestrator.sal.Node import Node
    from zeroos.orchestrator.configuration import get_jwt_token
    service = job.service
    if service.model.data.status != 'running':
        version = ''
    else:
        node = Node.from_ays(service, get_jwt_token(job.service.aysrepo))
        pong = node.client.ping()
        version = pong.split('Version: ')[1] if pong else ''
    service.model.data.version = version
    service.saveAll()
    return version
def input(job):
    from zeroos.orchestrator.sal.Node import Node
    from zeroos.orchestrator.configuration import get_configuration, get_jwt_token
    args = job.model.args
    ip = args.get('redisAddr')
    node = Node(ip, args.get('redisPort'), get_jwt_token(job.service.aysrepo))
    config = get_configuration(job.service.aysrepo)
    version = node.client.info.version()
    core0_version = config.get('0-core-version')
    core0_revision = config.get('0-core-revision')
    if (core0_version and core0_version != version['branch']) or \
            (core0_revision and core0_revision != version['revision']):
        raise RuntimeError(
            'Node with IP {} has a wrong version. Found version {}@{} and expected version {}@{} '.format(
                ip, version['branch'], version['revision'], core0_version, core0_revision))
def init(job):
    from zeroos.orchestrator.sal.Node import Node
    from zeroos.orchestrator.configuration import get_jwt_token
    service = job.service
    node = Node.from_ays(service, get_jwt_token(service.aysrepo))
    job.logger.info('create storage pool for fuse cache')
    poolname = '{}_fscache'.format(service.name)
    storagepool = node.ensure_persistance(poolname)
    storagepool.ays.create(service.aysrepo)
    statsdb_service = get_statsdb(service)
    if statsdb_service:
        stats_collector_actor = service.aysrepo.actorGet('stats_collector')
        args = {
            'node': service.name,
            'port': statsdb_service.model.data.port,
            'ip': statsdb_service.parent.model.data.redisAddr,
        }
        stats_collector_service = stats_collector_actor.serviceCreate(instance=service.name, args=args)
        stats_collector_service.consume(service)
def getAddresses(job):
    service = job.service
    networks = service.producers.get('network', [])
    networkmap = {}
    for network in networks:
        networkmap[network.name] = network.executeAction('getAddresses', args={'node_name': service.name})
    return networkmap
def install(job):
    from zeroos.orchestrator.sal.Node import Node
    from zeroos.orchestrator.configuration import get_jwt_token
    job.context['token'] = get_jwt_token(job.service.aysrepo)
    # at each boot recreate the complete state in the system
    service = job.service
    node = Node.from_ays(service, get_jwt_token(job.service.aysrepo))
    get_version(job)
    job.logger.info('mount storage pool for fuse cache')
    poolname = '{}_fscache'.format(service.name)
    node.ensure_persistance(poolname)
    # Set host name
    node.client.system('hostname %s' % service.model.data.hostname).get()
    node.client.bash('echo %s > /etc/hostname' % service.model.data.hostname).get()
    job.logger.info('configure networks')
    for network in service.producers.get('network', []):
        network.executeAction('configure', args={'node_name': service.name})
    stats_collector_service = get_stats_collector(service)
    statsdb_service = get_statsdb(service)
    if stats_collector_service and statsdb_service and statsdb_service.model.data.status == 'running':
        stats_collector_service.executeAction('install', context=job.context)
    node.client.bash('modprobe ipmi_si && modprobe ipmi_devintf').get()
def monitor(job):
    from zeroos.orchestrator.sal.Node import Node
    from zeroos.orchestrator.sal.healthcheck import HealthCheckObject
    from zeroos.orchestrator.configuration import get_jwt_token, get_configuration
    service = job.service
    config = get_configuration(service.aysrepo)
    token = get_jwt_token(job.service.aysrepo)
    job.context['token'] = token
    install_action = service.model.actionsState['install']
    if install_action != 'ok' and install_action != 'error':
        return
    healthcheck_service = job.service.aysrepo.serviceGet(role='healthcheck',
                                                         instance='node_%s' % service.name,
                                                         die=False)
    if healthcheck_service is None:
        healthcheck_actor = service.aysrepo.actorGet('healthcheck')
        healthcheck_service = healthcheck_actor.serviceCreate(instance='node_%s' % service.name)
        service.consume(healthcheck_service)
    nodestatus = HealthCheckObject('nodestatus', 'Node Status', 'Node Status', '/nodes/{}'.format(service.name))
    node = Node.from_ays(service, token, timeout=5)
    state = node.is_running()
    if state:
        service.model.data.status = 'running'
        configured = node.is_configured(service.name)
        if not configured:
            service.executeAction('install', context=job.context)
            for consumer in service.getConsumersRecursive():
                consumer.self_heal_action('monitor')
        stats_collector_service = get_stats_collector(service)
        statsdb_service = get_statsdb(service)
        # Check if statsdb is installed on this node and start it if needed
        if (statsdb_service and str(statsdb_service.parent) == str(job.service)
                and statsdb_service.model.data.status != 'running'):
            statsdb_service.executeAction('start', context=job.context)
        # Check if there is a running statsdb and if so make sure stats_collector for this node is started
        if (stats_collector_service and stats_collector_service.model.data.status != 'running'
                and statsdb_service.model.data.status == 'running'):
            stats_collector_service.executeAction('start', context=job.context)
        # healthchecks
        nodestatus.add_message('node', 'OK', 'Node is running')
        update_healthcheck(job, healthcheck_service, node.healthcheck.openfiledescriptors())
        update_healthcheck(job, healthcheck_service, node.healthcheck.cpu_mem())
        update_healthcheck(job, healthcheck_service, node.healthcheck.rotate_logs())
        update_healthcheck(job, healthcheck_service, node.healthcheck.network_bond())
        update_healthcheck(job, healthcheck_service, node.healthcheck.interrupts())
        update_healthcheck(job, healthcheck_service, node.healthcheck.context_switch())
        update_healthcheck(job, healthcheck_service, node.healthcheck.threads())
        update_healthcheck(job, healthcheck_service, node.healthcheck.qemu_vm_logs())
        update_healthcheck(job, healthcheck_service, node.healthcheck.network_load())
        update_healthcheck(job, healthcheck_service, node.healthcheck.disk_usage())
        update_healthcheck(job, healthcheck_service, node.healthcheck.ssh_cleanup(job=job))
        flist = config.get('healthcheck-flist', 'https://hub.gig.tech/gig-official-apps/healthcheck.flist')
        with node.healthcheck.with_container(flist) as cont:
            update_healthcheck(job, healthcheck_service, node.healthcheck.node_temperature(cont))
            update_healthcheck(job, healthcheck_service, node.healthcheck.powersupply(cont))
            update_healthcheck(job, healthcheck_service, node.healthcheck.fan(cont))
        # check network stability of  node with the rest of the nodes ! TODO
    else:
        if service.model.data.status != 'rebooting':
            service.model.data.status = 'halted'
            nodestatus.add_message('node', 'ERROR', 'Node is halted')
    update_healthcheck(job, healthcheck_service, nodestatus.to_dict())
    get_version(job)
    service.saveAll()
def update_healthcheck(job, health_service, healthchecks):
    import time
    service = job.service
    interval = service.model.actionGet('monitor').period
    new_healthchecks = list()
    if not isinstance(healthchecks, list):
        healthchecks = [healthchecks]
    defaultresource = '/nodes/{}'.format(service.name)
    for health_check in healthchecks:
        for health in health_service.model.data.healthchecks:
            # If this healthcheck already exists, update its attributes
            if health.id == health_check['id']:
                health.name = health_check.get('name', '')
                health.resource = health_check.get('resource', defaultresource) or defaultresource
                health.messages = health_check.get('messages', [])
                health.category = health_check.get('category', '')
                health.lasttime = time.time()
                health.interval = interval
                health.stacktrace = health_check.get('stacktrace', '')
                break
        else:
            # healthcheck doesn't exist in the current list, add it to the list of new
            health_check['lasttime'] = time.time()
            health_check['interval'] = interval
            new_healthchecks.append(health_check)
    old_healthchecks = health_service.model.data.to_dict().get('healthchecks', [])
    old_healthchecks.extend(new_healthchecks)
    health_service.model.data.healthchecks = old_healthchecks
def reboot(job):
    import time
    import redis
    from zeroos.orchestrator.sal.Node import Node
    from zeroos.orchestrator.configuration import get_jwt_token
    token = get_jwt_token(job.service.aysrepo)
    job.context['token'] = token
    service = job.service
    service._recurring_tasks['monitor'].stop()
    try:
        start = time.time()
        # Make sure any running monitor action finishes before we reboot
        while time.time() < start + 60:
            if not j.core.jobcontroller.db.jobs.list(
                    actor='node.zero-os', action='monitor', state='running', service=service.name):
                break
            time.sleep(1)
        else:
            raise j.exceptions.RuntimeError('Failed to reboot node. Waiting for monitoring action for too long')
        force_reboot = service.model.data.forceReboot
        vms = service.consumers.get('vm') or []
        for vm in vms:
            if vm.model.data.status != 'halted':
                if not force_reboot:
                    raise j.exceptions.RuntimeError(
                        'Failed to reboot node. Force reboot is not enabled and some vms are not halted')
                else:
                    vm.executeAction('shutdown', context=job.context)
        service.model.data.status = 'rebooting'
        job.logger.info('reboot node {}'.format(service))
        node = Node.from_ays(service, job.context['token'])
        node.client.raw('core.reboot', {})
    finally:
        start = time.time()
        while time.time() < start + 10:
            try:
                node = Node.from_ays(service, token, timeout=5)
                node.client.testConnectionAttempts = 0
                node.client.ping()
            except (RuntimeError, ConnectionError, redis.TimeoutError, TimeoutError):
                break
            time.sleep(1)
        else:
            job.logger.info("Could not wait within 10 seconds for node to reboot")
        service._recurring_tasks['monitor'].start()
def uninstall(job):
    from zeroos.orchestrator.configuration import get_jwt_token
    job.context['token'] = get_jwt_token(job.service.aysrepo)
    service = job.service
    stats_collector_service = get_stats_collector(service)
    if stats_collector_service:
        stats_collector_service.executeAction('uninstall', context=job.context)
    statsdb_service = get_statsdb(service)
    if statsdb_service and str(statsdb_service.parent) == str(service):
        statsdb_service.executeAction('uninstall', context=job.context)
    bootstraps = service.aysrepo.servicesFind(actor='bootstrap.zero-os')
    if bootstraps:
        bootstraps[0].executeAction('delete_node', args={'node_name': service.name})
    # Remove etcd_cluster if this was the last node service
    node_services = service.aysrepo.servicesFind(role='node')
    if len(node_services) > 1:
        return
    for etcd_cluster_service in service.aysrepo.servicesFind(role='etcd_cluster'):
        etcd_cluster_service.executeAction('delete', context=job.context)
        etcd_cluster_service.delete()
def watchdog(job):
    from zeroos.orchestrator.sal.Pubsub import Pubsub
    from zeroos.orchestrator.configuration import get_jwt_token
    from asyncio import sleep
    import asyncio
    import re
    import traceback
    service = job.service
    watched_roles = {
        'nbdserver': {
            'level': 20,
            'message': (re.compile('.*'),),
            'eof': True
        },
        'tlogserver': {
            'eof': True,
        },
        'ork': {
            'level': 20,
            'instance': job.service.name,
            'service': 'node',
            'eof': False,
            'message': (re.compile('.*'),),
            'handler': 'ork_handler',
        },
        'kvm': {
            'level': 20,
            'instance': job.service.name,
            'service': 'node',
            'eof': False,
            'message': (re.compile('.*'),),
            'handler': 'vm_handler',
            'sub_id': 'events',
        },
        'cloudinit': {
            'eof': True,
        },
        'http': {
            'eof': True,
        },
        'dhcp': {
            'eof': True,
        },
        'storage_engine': {
            'eof': True,
        },
        "etcd": {
            "eof": True,
        },
        'stats_collector': {
            'eof': True,
        },
        'zerostor': {
            'eof': True,
        },
        'container': {
            "eof": True,
        },
    }
    async def callback(jobid, level, message, flag):
        if '.' not in jobid:
            return
        role, sub_id = jobid.split('.', 1)
        if (role not in watched_roles or
                watched_roles[role].get('level', level) != level
                or watched_roles[role].get('sub_id', sub_id) != sub_id):
            return
        service_role = watched_roles[role].get('service', role)
        instance = watched_roles[role].get('instance', sub_id)
        eof = flag & 0x6 != 0
        valid_message = False
        matched_messages = watched_roles[role].get('message', ())
        for msg in matched_messages:
            if msg.match(message):
                valid_message = True
        if not valid_message and not (watched_roles[role]['eof'] and eof):
            return
        srv = service.aysrepo.serviceGet(role=service_role, instance=instance, die=False)
        if srv:
            args = {'message': message, 'eof': eof, 'level': level}
            job.context['token'] = get_jwt_token(job.service.aysrepo)
            handler = watched_roles[role].get('handler', 'watchdog_handler')
            await srv.asyncExecuteAction(handler, context=job.context, args=args)
    async def check_node(job):
        job.context['token'] = get_jwt_token(job.service.aysrepo)
        try:
            cl = Pubsub(service._loop, service.model.data.redisAddr, password=job.context['token'], callback=callback)
            await cl.ping()
            service.model.data.status = 'running'
        except (RuntimeError, OSError) as e:
            service.model.data.status = 'halted'
    async def streaming(job):
        # Check if the node is runing
        while service.model.actionsState['install'] != 'ok':
            await sleep(5)
        while str(service.model.data.status) != 'running':
            await sleep(5)
        # Add the looping here instead of the pubsub sal
        cl = None
        subscribed = None
        while True:
            if str(service.model.data.status) != 'running':
                await sleep(5)
                continue
            if cl is None:
                job.context['token'] = get_jwt_token(job.service.aysrepo)
                cl = Pubsub(service._loop, service.model.data.redisAddr, password=job.context['token'], callback=callback)
            try:
                if not subscribed:
                    queue = await cl.subscribe('ays.monitor')
                    subscribed = True
                await cl.global_stream(queue)
            except asyncio.TimeoutError as e:
                job.logger.error(e)
                await check_node(job)
                cl = None
                subscribed = None
            except OSError as e:
                job.logger.error(e)
                await check_node(job)
                cl = None
                subscribed = None
            except RuntimeError as e:
                job.logger.error(e)
                await check_node(job)
                cl = None
                subscribed = None
            except Exception as e:
                job.logger.error(traceback.format_exc())
                await check_node(job)
                cl = None
                subscribed = None
    return streaming(job)
def nic_shutdown(job, message):
    from zeroos.orchestrator.sal.Node import Node
    from zeroos.orchestrator.configuration import get_jwt_token
    service = job.service
    node = Node.from_ays(service, get_jwt_token(service.aysrepo))
    interface = message['name']
    if interface.startswith('cont'):
        container_id = interface.split('-')[0].replace('cont', '')
        for container in node.containers.list():
            if str(container.id) == container_id:
                container_service = service.aysrepo.serviceGet(role='container', instance=container.name)
                container_service.model.data.status = 'networkKilled'
                container_service.saveAll()
                return
    else:
        vms = node.client.kvm.list()
        for vm in vms:
            if interface in vm['ifctargets']:
                vm_service = service.aysrepo.serviceGet(role='vm', instance=vm['name'])
                vm_service.model.data.status = 'networkKilled'
                vm_service.saveAll()
                return
    job.logger.info('Failed to find vm/container interface matching %s' % interface)
def ork_handler(job):
    import json
    from zeroos.orchestrator.utils import send_event
    message = job.model.args.get('message')
    if not message:
        return
    message = json.loads(message)
    send_event('ork', message, job.service.aysrepo)
    if message['event'] == 'NIC_SHUTDOWN':
        nic_shutdown(job, message)
    elif message['event'] == 'VM_QUARANTINE' and message['state'] == 'WARNING':
        job.logger.info('VM %s exceeded cpu threshold and will be quarantined soon' % message['name'])
    elif message['event'] == 'VM_QUARANTINE' and message['state'] == 'SUCCESS':
        job.logger.info('Vm %s has been quarantined' % message['name'])
    elif message['event'] == 'VM_UNQUARANTINE' and message['state'] == 'SUCCESS':
        job.logger.info('Vm %s has been released from quarantine' % message['name'])
def start_vm(job, vm):
    import asyncio
    from zeroos.orchestrator.configuration import get_jwt_token
    if vm.model.data.status == 'running':
        job.context['token'] = get_jwt_token(job.service.aysrepo)
        asyncio.ensure_future(vm.asyncExecuteAction('start', context=job.context), loop=job.service._loop)
def shutdown_vm(job, vm):
    import asyncio
    from zeroos.orchestrator.configuration import get_jwt_token
    if vm.model.data.status == 'running':
        job.context['token'] = get_jwt_token(job.service.aysrepo)
        asyncio.ensure_future(vm.asyncExecuteAction('shutdown', context=job.context), loop=job.service._loop)
def vm_handler(job):
    import json
    import asyncio
    message = job.model.args.get('message')
    if not message:
        return
    message = json.loads(message)
    vm = job.service.aysrepo.serviceGet(role='vm', instance=message['name'])
    if not vm:
        return
    if message['event'] == 'stopped' and message['detail'] == 'failed':
        asyncio.ensure_future(start_vm(job, vm))
    if message['event'] == 'stopped' and message['detail'] == 'shutdown':
        asyncio.ensure_future(shutdown_vm(job, vm))
def processChange(job):
    service = job.service
    args = job.model.args
    node_data = service.model.data.to_dict()
    if 'forceReboot' in args and node_data.get('forceReboot') != args['forceReboot']:
        service.model.data.forceReboot = args['forceReboot']
        service.saveAll()
 | |
| 
	"""The tests for the manual_mqtt Alarm Control Panel component."""
from datetime import timedelta
import unittest
from unittest.mock import patch
from homeassistant.setup import setup_component
from homeassistant.const import (
    STATE_ALARM_DISARMED, STATE_ALARM_ARMED_HOME, STATE_ALARM_ARMED_AWAY,
    STATE_ALARM_PENDING, STATE_ALARM_TRIGGERED)
from homeassistant.components import alarm_control_panel
import homeassistant.util.dt as dt_util
from tests.common import (
    fire_time_changed, get_test_home_assistant,
    mock_mqtt_component, fire_mqtt_message, assert_setup_component)
CODE = 'HELLO_CODE'
class TestAlarmControlPanelManualMqtt(unittest.TestCase):
    """Test the manual_mqtt alarm module."""
    def setUp(self):  # pylint: disable=invalid-name
        """Setup things to be run when tests are started."""
        self.hass = get_test_home_assistant()
        self.mock_publish = mock_mqtt_component(self.hass)
    def tearDown(self):  # pylint: disable=invalid-name
        """Stop down everything that was started."""
        self.hass.stop()
    def test_fail_setup_without_state_topic(self):
        """Test for failing with no state topic."""
        with assert_setup_component(0) as config:
            assert setup_component(self.hass, alarm_control_panel.DOMAIN, {
                alarm_control_panel.DOMAIN: {
                    'platform': 'mqtt_alarm',
                    'command_topic': 'alarm/command'
                }
            })
            assert not config[alarm_control_panel.DOMAIN]
    def test_fail_setup_without_command_topic(self):
        """Test failing with no command topic."""
        with assert_setup_component(0):
            assert setup_component(self.hass, alarm_control_panel.DOMAIN, {
                alarm_control_panel.DOMAIN: {
                    'platform': 'mqtt_alarm',
                    'state_topic': 'alarm/state'
                }
            })
    def test_arm_home_no_pending(self):
        """Test arm home method."""
        self.assertTrue(setup_component(
            self.hass, alarm_control_panel.DOMAIN,
            {'alarm_control_panel': {
                'platform': 'manual_mqtt',
                'name': 'test',
                'code': CODE,
                'pending_time': 0,
                'disarm_after_trigger': False,
                'command_topic': 'alarm/command',
                'state_topic': 'alarm/state',
            }}))
        entity_id = 'alarm_control_panel.test'
        self.assertEqual(STATE_ALARM_DISARMED,
                         self.hass.states.get(entity_id).state)
        alarm_control_panel.alarm_arm_home(self.hass, CODE)
        self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_ARMED_HOME,
                         self.hass.states.get(entity_id).state)
    def test_arm_home_with_pending(self):
        """Test arm home method."""
        self.assertTrue(setup_component(
            self.hass, alarm_control_panel.DOMAIN,
            {'alarm_control_panel': {
                'platform': 'manual_mqtt',
                'name': 'test',
                'code': CODE,
                'pending_time': 1,
                'disarm_after_trigger': False,
                'command_topic': 'alarm/command',
                'state_topic': 'alarm/state',
            }}))
        entity_id = 'alarm_control_panel.test'
        self.assertEqual(STATE_ALARM_DISARMED,
                         self.hass.states.get(entity_id).state)
        alarm_control_panel.alarm_arm_home(self.hass, CODE, entity_id)
        self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_PENDING,
                         self.hass.states.get(entity_id).state)
        future = dt_util.utcnow() + timedelta(seconds=1)
        with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
                    'dt_util.utcnow'), return_value=future):
            fire_time_changed(self.hass, future)
            self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_ARMED_HOME,
                         self.hass.states.get(entity_id).state)
    def test_arm_home_with_invalid_code(self):
        """Attempt to arm home without a valid code."""
        self.assertTrue(setup_component(
            self.hass, alarm_control_panel.DOMAIN,
            {'alarm_control_panel': {
                'platform': 'manual_mqtt',
                'name': 'test',
                'code': CODE,
                'pending_time': 1,
                'disarm_after_trigger': False,
                'command_topic': 'alarm/command',
                'state_topic': 'alarm/state',
            }}))
        entity_id = 'alarm_control_panel.test'
        self.assertEqual(STATE_ALARM_DISARMED,
                         self.hass.states.get(entity_id).state)
        alarm_control_panel.alarm_arm_home(self.hass, CODE + '2')
        self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_DISARMED,
                         self.hass.states.get(entity_id).state)
    def test_arm_away_no_pending(self):
        """Test arm home method."""
        self.assertTrue(setup_component(
            self.hass, alarm_control_panel.DOMAIN,
            {'alarm_control_panel': {
                'platform': 'manual_mqtt',
                'name': 'test',
                'code': CODE,
                'pending_time': 0,
                'disarm_after_trigger': False,
                'command_topic': 'alarm/command',
                'state_topic': 'alarm/state',
            }}))
        entity_id = 'alarm_control_panel.test'
        self.assertEqual(STATE_ALARM_DISARMED,
                         self.hass.states.get(entity_id).state)
        alarm_control_panel.alarm_arm_away(self.hass, CODE, entity_id)
        self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_ARMED_AWAY,
                         self.hass.states.get(entity_id).state)
    def test_arm_away_with_pending(self):
        """Test arm home method."""
        self.assertTrue(setup_component(
            self.hass, alarm_control_panel.DOMAIN,
            {'alarm_control_panel': {
                'platform': 'manual_mqtt',
                'name': 'test',
                'code': CODE,
                'pending_time': 1,
                'disarm_after_trigger': False,
                'command_topic': 'alarm/command',
                'state_topic': 'alarm/state',
            }}))
        entity_id = 'alarm_control_panel.test'
        self.assertEqual(STATE_ALARM_DISARMED,
                         self.hass.states.get(entity_id).state)
        alarm_control_panel.alarm_arm_away(self.hass, CODE)
        self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_PENDING,
                         self.hass.states.get(entity_id).state)
        future = dt_util.utcnow() + timedelta(seconds=1)
        with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
                    'dt_util.utcnow'), return_value=future):
            fire_time_changed(self.hass, future)
            self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_ARMED_AWAY,
                         self.hass.states.get(entity_id).state)
    def test_arm_away_with_invalid_code(self):
        """Attempt to arm away without a valid code."""
        self.assertTrue(setup_component(
            self.hass, alarm_control_panel.DOMAIN,
            {'alarm_control_panel': {
                'platform': 'manual_mqtt',
                'name': 'test',
                'code': CODE,
                'pending_time': 1,
                'disarm_after_trigger': False,
                'command_topic': 'alarm/command',
                'state_topic': 'alarm/state',
            }}))
        entity_id = 'alarm_control_panel.test'
        self.assertEqual(STATE_ALARM_DISARMED,
                         self.hass.states.get(entity_id).state)
        alarm_control_panel.alarm_arm_away(self.hass, CODE + '2')
        self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_DISARMED,
                         self.hass.states.get(entity_id).state)
    def test_trigger_no_pending(self):
        """Test triggering when no pending submitted method."""
        self.assertTrue(setup_component(
            self.hass, alarm_control_panel.DOMAIN,
            {'alarm_control_panel': {
                'platform': 'manual_mqtt',
                'name': 'test',
                'trigger_time': 1,
                'disarm_after_trigger': False,
                'command_topic': 'alarm/command',
                'state_topic': 'alarm/state',
            }}))
        entity_id = 'alarm_control_panel.test'
        self.assertEqual(STATE_ALARM_DISARMED,
                         self.hass.states.get(entity_id).state)
        alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
        self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_PENDING,
                         self.hass.states.get(entity_id).state)
        future = dt_util.utcnow() + timedelta(seconds=60)
        with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
                    'dt_util.utcnow'), return_value=future):
            fire_time_changed(self.hass, future)
            self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_TRIGGERED,
                         self.hass.states.get(entity_id).state)
    def test_trigger_with_pending(self):
        """Test arm home method."""
        self.assertTrue(setup_component(
            self.hass, alarm_control_panel.DOMAIN,
            {'alarm_control_panel': {
                'platform': 'manual_mqtt',
                'name': 'test',
                'pending_time': 2,
                'trigger_time': 3,
                'disarm_after_trigger': False,
                'command_topic': 'alarm/command',
                'state_topic': 'alarm/state',
            }}))
        entity_id = 'alarm_control_panel.test'
        self.assertEqual(STATE_ALARM_DISARMED,
                         self.hass.states.get(entity_id).state)
        alarm_control_panel.alarm_trigger(self.hass)
        self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_PENDING,
                         self.hass.states.get(entity_id).state)
        future = dt_util.utcnow() + timedelta(seconds=2)
        with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
                    'dt_util.utcnow'), return_value=future):
            fire_time_changed(self.hass, future)
            self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_TRIGGERED,
                         self.hass.states.get(entity_id).state)
        future = dt_util.utcnow() + timedelta(seconds=5)
        with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
                    'dt_util.utcnow'), return_value=future):
            fire_time_changed(self.hass, future)
            self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_DISARMED,
                         self.hass.states.get(entity_id).state)
    def test_trigger_with_disarm_after_trigger(self):
        """Test disarm after trigger."""
        self.assertTrue(setup_component(
            self.hass, alarm_control_panel.DOMAIN,
            {'alarm_control_panel': {
                'platform': 'manual_mqtt',
                'name': 'test',
                'trigger_time': 5,
                'pending_time': 0,
                'disarm_after_trigger': True,
                'command_topic': 'alarm/command',
                'state_topic': 'alarm/state',
            }}))
        entity_id = 'alarm_control_panel.test'
        self.assertEqual(STATE_ALARM_DISARMED,
                         self.hass.states.get(entity_id).state)
        alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
        self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_TRIGGERED,
                         self.hass.states.get(entity_id).state)
        future = dt_util.utcnow() + timedelta(seconds=5)
        with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
                    'dt_util.utcnow'), return_value=future):
            fire_time_changed(self.hass, future)
            self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_DISARMED,
                         self.hass.states.get(entity_id).state)
    def test_disarm_while_pending_trigger(self):
        """Test disarming while pending state."""
        self.assertTrue(setup_component(
            self.hass, alarm_control_panel.DOMAIN,
            {'alarm_control_panel': {
                'platform': 'manual_mqtt',
                'name': 'test',
                'trigger_time': 5,
                'disarm_after_trigger': False,
                'command_topic': 'alarm/command',
                'state_topic': 'alarm/state',
            }}))
        entity_id = 'alarm_control_panel.test'
        self.assertEqual(STATE_ALARM_DISARMED,
                         self.hass.states.get(entity_id).state)
        alarm_control_panel.alarm_trigger(self.hass)
        self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_PENDING,
                         self.hass.states.get(entity_id).state)
        alarm_control_panel.alarm_disarm(self.hass, entity_id=entity_id)
        self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_DISARMED,
                         self.hass.states.get(entity_id).state)
        future = dt_util.utcnow() + timedelta(seconds=5)
        with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
                    'dt_util.utcnow'), return_value=future):
            fire_time_changed(self.hass, future)
            self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_DISARMED,
                         self.hass.states.get(entity_id).state)
    def test_disarm_during_trigger_with_invalid_code(self):
        """Test disarming while code is invalid."""
        self.assertTrue(setup_component(
            self.hass, alarm_control_panel.DOMAIN,
            {'alarm_control_panel': {
                'platform': 'manual_mqtt',
                'name': 'test',
                'pending_time': 5,
                'code': CODE + '2',
                'disarm_after_trigger': False,
                'command_topic': 'alarm/command',
                'state_topic': 'alarm/state',
            }}))
        entity_id = 'alarm_control_panel.test'
        self.assertEqual(STATE_ALARM_DISARMED,
                         self.hass.states.get(entity_id).state)
        alarm_control_panel.alarm_trigger(self.hass)
        self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_PENDING,
                         self.hass.states.get(entity_id).state)
        alarm_control_panel.alarm_disarm(self.hass, entity_id=entity_id)
        self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_PENDING,
                         self.hass.states.get(entity_id).state)
        future = dt_util.utcnow() + timedelta(seconds=5)
        with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
                    'dt_util.utcnow'), return_value=future):
            fire_time_changed(self.hass, future)
            self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_TRIGGERED,
                         self.hass.states.get(entity_id).state)
    def test_arm_home_via_command_topic(self):
        """Test arming home via command topic."""
        assert setup_component(self.hass, alarm_control_panel.DOMAIN, {
            alarm_control_panel.DOMAIN: {
                'platform': 'manual_mqtt',
                'name': 'test',
                'pending_time': 1,
                'state_topic': 'alarm/state',
                'command_topic': 'alarm/command',
                'payload_arm_home': 'ARM_HOME',
            }
        })
        entity_id = 'alarm_control_panel.test'
        self.assertEqual(STATE_ALARM_DISARMED,
                         self.hass.states.get(entity_id).state)
        # Fire the arm command via MQTT; ensure state changes to pending
        fire_mqtt_message(self.hass, 'alarm/command', 'ARM_HOME')
        self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_PENDING,
                         self.hass.states.get(entity_id).state)
        # Fast-forward a little bit
        future = dt_util.utcnow() + timedelta(seconds=1)
        with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
                    'dt_util.utcnow'), return_value=future):
            fire_time_changed(self.hass, future)
            self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_ARMED_HOME,
                         self.hass.states.get(entity_id).state)
    def test_arm_away_via_command_topic(self):
        """Test arming away via command topic."""
        assert setup_component(self.hass, alarm_control_panel.DOMAIN, {
            alarm_control_panel.DOMAIN: {
                'platform': 'manual_mqtt',
                'name': 'test',
                'pending_time': 1,
                'state_topic': 'alarm/state',
                'command_topic': 'alarm/command',
                'payload_arm_away': 'ARM_AWAY',
            }
        })
        entity_id = 'alarm_control_panel.test'
        self.assertEqual(STATE_ALARM_DISARMED,
                         self.hass.states.get(entity_id).state)
        # Fire the arm command via MQTT; ensure state changes to pending
        fire_mqtt_message(self.hass, 'alarm/command', 'ARM_AWAY')
        self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_PENDING,
                         self.hass.states.get(entity_id).state)
        # Fast-forward a little bit
        future = dt_util.utcnow() + timedelta(seconds=1)
        with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
                    'dt_util.utcnow'), return_value=future):
            fire_time_changed(self.hass, future)
            self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_ARMED_AWAY,
                         self.hass.states.get(entity_id).state)
    def test_disarm_pending_via_command_topic(self):
        """Test disarming pending alarm via command topic."""
        assert setup_component(self.hass, alarm_control_panel.DOMAIN, {
            alarm_control_panel.DOMAIN: {
                'platform': 'manual_mqtt',
                'name': 'test',
                'pending_time': 1,
                'state_topic': 'alarm/state',
                'command_topic': 'alarm/command',
                'payload_disarm': 'DISARM',
            }
        })
        entity_id = 'alarm_control_panel.test'
        self.assertEqual(STATE_ALARM_DISARMED,
                         self.hass.states.get(entity_id).state)
        alarm_control_panel.alarm_trigger(self.hass)
        self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_PENDING,
                         self.hass.states.get(entity_id).state)
        # Now that we're pending, receive a command to disarm
        fire_mqtt_message(self.hass, 'alarm/command', 'DISARM')
        self.hass.block_till_done()
        self.assertEqual(STATE_ALARM_DISARMED,
                         self.hass.states.get(entity_id).state)
    def test_state_changes_are_published_to_mqtt(self):
        """Test publishing of MQTT messages when state changes."""
        assert setup_component(self.hass, alarm_control_panel.DOMAIN, {
            alarm_control_panel.DOMAIN: {
                'platform': 'manual_mqtt',
                'name': 'test',
                'pending_time': 1,
                'trigger_time': 1,
                'state_topic': 'alarm/state',
                'command_topic': 'alarm/command',
            }
        })
        # Component should send disarmed alarm state on startup
        self.hass.block_till_done()
        self.assertEqual(('alarm/state', STATE_ALARM_DISARMED, 0, True),
                         self.mock_publish.mock_calls[-2][1])
        # Arm in home mode
        alarm_control_panel.alarm_arm_home(self.hass)
        self.hass.block_till_done()
        self.assertEqual(('alarm/state', STATE_ALARM_PENDING, 0, True),
                         self.mock_publish.mock_calls[-2][1])
        # Fast-forward a little bit
        future = dt_util.utcnow() + timedelta(seconds=1)
        with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
                    'dt_util.utcnow'), return_value=future):
            fire_time_changed(self.hass, future)
            self.hass.block_till_done()
        self.assertEqual(('alarm/state', STATE_ALARM_ARMED_HOME, 0, True),
                         self.mock_publish.mock_calls[-2][1])
        # Arm in away mode
        alarm_control_panel.alarm_arm_away(self.hass)
        self.hass.block_till_done()
        self.assertEqual(('alarm/state', STATE_ALARM_PENDING, 0, True),
                         self.mock_publish.mock_calls[-2][1])
        # Fast-forward a little bit
        future = dt_util.utcnow() + timedelta(seconds=1)
        with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
                    'dt_util.utcnow'), return_value=future):
            fire_time_changed(self.hass, future)
            self.hass.block_till_done()
        self.assertEqual(('alarm/state', STATE_ALARM_ARMED_AWAY, 0, True),
                         self.mock_publish.mock_calls[-2][1])
        # Disarm
        alarm_control_panel.alarm_disarm(self.hass)
        self.hass.block_till_done()
        self.assertEqual(('alarm/state', STATE_ALARM_DISARMED, 0, True),
                         self.mock_publish.mock_calls[-2][1])
 | |
| 
	"""Flask extension of Flask-Cache.
"""
from functools import wraps
from flask import request, current_app
from flask.signals import Namespace
from werkzeug import urls
from flask_cache import Cache as CacheBase
# pylint: disable=invalid-name
signals = Namespace()
# Signals for dealing with cache invalidation after REST methods.
after_post = signals.signal('after_post', doc="""
Signal which should be sent after a POST operation.
""")
after_put = signals.signal('after_put', doc="""
Signal which should be sent after a PUT operation.
""")
after_patch = signals.signal('after_patch', doc="""
Signal which should be sent after a PATCH operation.
""")
after_delete = signals.signal('after_delete', doc="""
Signal which should be sent after a DELETE operation.
""")
# pylint: enable=invalid-name
class Cache(CacheBase):
    """Manager class for handling creating and deleting cache keys based o
    view events.
    """
    view_key_format = '{namespace}:view:{path}'
    def init_app(self, app, config=None):
        if config is None:
            config = app.config
        config.setdefault('CARAFE_CACHE_ENABLED', True)
        config.setdefault('CARAFE_CACHE_IGNORED_REQUEST_ARGS', [])
        if not config['CARAFE_CACHE_ENABLED']:  # pragma: no cover
            return
        super(Cache, self).init_app(app, config=config)
        self.connect_signals()
    def connect_signals(self):
        """Connect supported signals to handlers."""
        after_post.connect(self.on_after_post)
        after_put.connect(self.on_after_put)
        after_patch.connect(self.on_after_patch)
        after_delete.connect(self.on_after_delete)
    def get_cache_namespace(self, obj):
        """Determine object's cache namespace."""
        if getattr(obj, 'cache_namespace', None) is not None:
            return (obj.cache_namespace if not callable(obj.cache_namespace)
                    else obj.cache_namespace())
        elif hasattr(obj, '__name__'):
            return obj.__name__
        else:
            return obj.__class__.__name__
    @property
    def client(self):
        """Proxy to cache client wrapper."""
        return self.cache if self.enabled else None
    @property
    def server(self):
        """Proxy to cache server client."""
        return getattr(self.cache, '_client', None) if self.enabled else None
    @property
    def enabled(self):
        """Property access to config's CARAFE_CACHE_ENABLED."""
        return current_app.config['CARAFE_CACHE_ENABLED']
    @property
    def cache_key_prefix(self):
        return current_app.config['CACHE_KEY_PREFIX']
    def clear_keys(self, *keys):
        """Clear specified keys"""
        if not keys:  # pragma: no cover
            return
        keys = [self.cache_key_prefix + k for k in keys]
        self.server.delete(*keys)
    def clear_prefixes(self, *prefixes):
        """Clear keys starting with prefix"""
        if not prefixes:  # pragma: no cover
            return
        def search_prefix(prefix):
            return '{0}{1}*'.format(self.cache_key_prefix, prefix)
        keys = []
        for prefix in prefixes:
            keys += self.server.keys(search_prefix(prefix))
        if keys:
            self.server.delete(*keys)
    def clear(self, prefixes=None, keys=None):
        """Clear cache keys using an optional prefix, regex, and/or list of
        keys.
        """
        if not self.enabled:
            return
        if not any([prefixes, keys]) or not hasattr(self.server, 'pipeline'):
            # this is the same as clearing the entire cache
            try:
                self.cache.clear()
            except Exception as ex:  # pragma: no cover
                current_app.logger.exception(ex)
            return
        if prefixes:
            try:
                self.clear_prefixes(*prefixes)
            except Exception as ex:  # pragma: no cover
                current_app.logger.exception(ex)
        if keys:
            try:
                self.clear_keys(*keys)
            except Exception as ex:  # pragma: no cover
                current_app.logger.exception(ex)
    def cached_view(self,
                    timeout=None,
                    namespace=None,
                    unless=None,
                    include_request_args=True):
        """Wrapper around self.cached which itself is a decorator. We're
        wrapping because we want to have access to the class instance of the
        view in order to namespace the key. We can't always namespace using
        key_prefix since some cache decorators are placed around parent classes
        which don't know anything about the child class.
        """
        # pylint: disable=missing-docstring
        def wrap(func):
            @wraps(func)
            def wrapper(*args, **kargs):
                if not self.enabled:
                    return func(*args, **kargs)
                if namespace is not None:
                    # Make namespace available in case `f` is used as signal
                    # sender. Mainly used to get namespace when invalidating
                    # cache keys via namespace prefix.
                    func.cache_namespace = namespace
                # If args[0] is set, then this is a class based view, else use
                # function.
                obj = args[0] if args else func
                cache_namespace = self.get_cache_namespace(obj)
                view_path = self.create_view_path(include_request_args)
                key_prefix = self.view_key_format.format(
                    namespace=cache_namespace,
                    path=view_path,
                    **request.view_args)
                cached = self.cached(timeout=timeout,
                                     key_prefix=key_prefix,
                                     unless=unless)(func)
                try:
                    # Cache server could be down.
                    result = cached(*args, **kargs)
                except Exception as ex:  # pragma: no cover
                    # Return function call instead.
                    current_app.logger.exception(ex)
                    result = func(*args, **kargs)
                return result
            return wrapper
        return wrap
    def create_view_path(self, include_request_args=False):
        """Construct view path from request.path with option to include GET
        args.
        """
        href = urls.Href(request.path)
        if include_request_args:
            ignored = current_app.config['CARAFE_CACHE_IGNORED_REQUEST_ARGS']
            args = dict((k, v) for k, v in request.args.lists()
                        if k not in ignored)
        else:
            args = None
        return href(args)
    def on_modified_record(self, sender):
        """Common tasks to perform when a record is modified."""
        namespace = self.get_cache_namespace(sender)
        prefixes = [self.view_key_format.format(namespace=namespace, path='')]
        # Append cascade keys which should be invalidated (typically due to
        # this API's data being used in other APIs).
        prefixes += getattr(sender, 'cache_cascade', [])
        try:
            self.clear(prefixes=prefixes)
        except Exception as ex:  # pragma: no cover
            current_app.logger.exception(ex)
    def on_after_post(self, sender):
        """Handle the `after_post` event. Executed after a POST request."""
        self.on_modified_record(sender)
    def on_after_put(self, sender):
        """Handle the `after_put` event. Executed after a PUT request."""
        self.on_modified_record(sender)
    def on_after_patch(self, sender):
        """Handle the `after_patch` event. Executed after a PATCH request."""
        self.on_modified_record(sender)
    def on_after_delete(self, sender):
        """Handle the `after_delete` event. Executed after a DELETE request."""
        self.on_modified_record(sender)
 | |
| 
	import numpy
import chainer
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import configuration
from chainer import function_node
from chainer.utils import argument
from chainer.utils import type_check
if cuda.cudnn_enabled:
    cudnn = cuda.cudnn
class Dropout(function_node.FunctionNode):
    """Dropout regularization."""
    _use_cudnn = False
    def __init__(self, dropout_ratio, mask=None):
        if not 0.0 <= dropout_ratio < 1.0:
            raise ValueError('dropout_ratio must be in the range [0, 1)')
        self.dropout_ratio = dropout_ratio
        self.mask = mask
    def check_type_forward(self, in_types):
        type_check.expect(in_types.size() == 1)
        type_check.expect(in_types[0].dtype.kind == 'f')
    def forward_cpu(self, x):
        if (intel64.should_use_ideep('>=auto')
                and intel64.inputs_all_ready(x)
                and self.mask is None):
            return self._forward_ideep(x)
        if self.mask is not None:
            y = x[0] * self.mask
        else:
            scale = x[0].dtype.type(1. / (1 - self.dropout_ratio))
            flag = numpy.random.rand(*x[0].shape) >= self.dropout_ratio
            self.mask = scale * flag
            y = x[0] * self.mask
        return y,
    def forward_gpu(self, x):
        if (chainer.should_use_cudnn('==always', 5000)
                and x[0].flags.c_contiguous
                and self.mask is None):
            self._use_cudnn = True
            handle = cudnn.get_handle()
            if hasattr(self, 'states'):
                # if we already have a dropout mask,
                # the forward operation is equal to backward.
                return cuda.get_cudnn_dropout_states().backward(
                    handle, x[0], self.dropout_ratio, self.states),
            self.states, y = cuda.get_cudnn_dropout_states().forward(
                handle, x[0], self.dropout_ratio)
            return y,
        else:
            if self.mask is not None:
                y = x[0] * self.mask
            else:
                rand = cuda.cupy.random.rand(*x[0].shape, dtype=numpy.float32)
                scale = x[0].dtype.type(1. / (1 - self.dropout_ratio))
                self.mask, y = cuda.elementwise(
                    'T x, R r, T scale, T ratio', 'T mask, T y',
                    '''
                    mask = (r >= ratio) * scale;
                    y = x * mask;
                    ''',
                    'dropout_fwd',
                )(x[0], rand, scale, self.dropout_ratio)
            return y,
    def _forward_ideep(self, x):
        mask, y = intel64.ideep.dropout.Forward(
            intel64.ideep.array(x[0]),
            self.dropout_ratio)
        self.mask = mask
        return y,
    def backward(self, x, gy):
        if chainer.should_use_cudnn('==always', 5000) and self._use_cudnn:
            return DropoutGradCuDNN(self.states, self.dropout_ratio).apply(gy)
        else:
            return DropoutGrad(self.mask).apply(gy)
class DropoutGrad(function_node.FunctionNode):
    """Computes the gradient of the Dropout function."""
    def __init__(self, mask):
        self.mask = mask
    def forward(self, inputs):
        if (intel64.should_use_ideep('>=auto')
                and intel64.inputs_all_ready(inputs)):
            return self._forward_ideep(inputs)
        y = inputs[0] * self.mask
        return y,
    def _forward_ideep(self, inputs):
        return intel64.ideep.dropout.Backward(
            intel64.ideep.array(self.mask),
            intel64.ideep.array(inputs[0])),
    def backward(self, indexes, gy):
        return DropoutGrad(self.mask).apply(gy)
class DropoutGradCuDNN(function_node.FunctionNode):
    """Computes the gradient of the Dropout function with cuDNN support."""
    def __init__(self, states, dropout_ratio):
        self.states = states
        self.dropout_ratio = dropout_ratio
    def forward(self, inputs):
        handle = cudnn.get_handle()
        return cuda.get_cudnn_dropout_states().backward(
            handle, inputs[0], self.dropout_ratio, self.states),
    def backward(self, indexes, gy):
        return DropoutGradCuDNN(self.states, self.dropout_ratio).apply(gy)
def dropout(x, ratio=.5, **kwargs):
    """dropout(x, ratio=.5, *, mask=None, return_mask=False)
    Drops elements of input variable randomly.
    This function drops input elements randomly with probability ``ratio`` and
    scales the remaining elements by factor ``1 / (1 - ratio)``. In testing
    mode (i.e., ``chainer.config.train`` is set to ``False``), it does nothing
    and just returns ``x``.
    .. warning::
       ``train`` argument is not supported anymore since v2.
       Instead, use ``chainer.using_config('train', boolean)``.
       See :func:`chainer.using_config`.
    Args:
        x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
        :class:`cupy.ndarray`):
            Input variable. A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
        ratio (float):
            Dropout ratio. The ``ratio`` must be ``0.0 <= ratio < 1.0``.
        mask (`ndarray` or None):
            The mask to be used for dropout.
            You do not have to specify this value, unless you need to make
            results deterministic.
            If ``mask`` is not specified or set to ``None``, a mask will be
            generated randomly according to the given ``ratio``.
            If ``mask`` is specified, ``ratio`` will be ignored.
            The shape and dtype must be the same as ``x`` and should be on the
            same device.
            Note that iDeep and cuDNN will not be used for this function if
            mask is specified, as iDeep and cuDNN do not support it.
        return_mask (bool):
            If ``True``, the mask used for dropout is returned together with
            the output variable.
            The returned mask can later be reused by passing it to ``mask``
            argument.
    Returns:
        ~chainer.Variable or tuple:
            When ``return_mask`` is ``False`` (default), returns the output
            variable.
            When ``True``, returns the tuple of the output variable and
            mask (`ndarray`). The mask will be on the same device as the input.
            The mask will become ``None`` when ``chainer.config.train`` is set
            to ``False``.
    See the paper by G. Hinton: `Improving neural networks by preventing \
    co-adaptation of feature detectors <https://arxiv.org/abs/1207.0580>`_.
    .. admonition:: Example
        >>> x = np.array([[-1, 0], [2, -3], [-2, 1]], np.float32)
        >>> with chainer.using_config('train', True):
        ...     y = F.dropout(x)
        >>> y.data
        array([[-2.,  0.],
               [ 4., -6.],
               [-0.,  2.]], dtype=float32)
        >>> with chainer.using_config('train', True):
        ...     y = F.dropout(x, ratio=0.0) \
# dropout returns original input if ratio=0.0
        >>> (x == y.data).all()
        True
        >>> with chainer.using_config('train', False):
        ...     y = F.dropout(x) \
# dropout in test mode returns original input
        >>> (x == y.data).all()
        True
    """
    mask = None
    return_mask = False
    if kwargs:
        mask, return_mask = argument.parse_kwargs(
            kwargs, ('mask', mask), ('return_mask', return_mask),
            train='train argument is not supported anymore. '
                  'Use chainer.using_config')
    if configuration.config.train:
        func = Dropout(ratio, mask)
        out, = func.apply((x,))
        mask = func.mask
    else:
        out = chainer.as_variable(x)
        mask = None
    if return_mask:
        return out, mask
    return out
 | |
| 
	# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This application demonstrates how to create and restore from backups
using Cloud Spanner.
For more information, see the README.rst under /spanner.
"""
import argparse
from datetime import datetime, timedelta
import time
from google.cloud import spanner
# [START spanner_create_backup]
def create_backup(instance_id, database_id, backup_id, version_time):
    """Creates a backup for a database."""
    spanner_client = spanner.Client()
    instance = spanner_client.instance(instance_id)
    database = instance.database(database_id)
    # Create a backup
    expire_time = datetime.utcnow() + timedelta(days=14)
    backup = instance.backup(backup_id, database=database, expire_time=expire_time, version_time=version_time)
    operation = backup.create()
    # Wait for backup operation to complete.
    operation.result(2100)
    # Verify that the backup is ready.
    backup.reload()
    assert backup.is_ready() is True
    # Get the name, create time and backup size.
    backup.reload()
    print(
        "Backup {} of size {} bytes was created at {} for version of database at {}".format(
            backup.name, backup.size_bytes, backup.create_time, backup.version_time
        )
    )
# [END spanner_create_backup]
# [START spanner_create_backup_with_encryption_key]
def create_backup_with_encryption_key(instance_id, database_id, backup_id, kms_key_name):
    """Creates a backup for a database using a Customer Managed Encryption Key (CMEK)."""
    from google.cloud.spanner_admin_database_v1 import CreateBackupEncryptionConfig
    spanner_client = spanner.Client()
    instance = spanner_client.instance(instance_id)
    database = instance.database(database_id)
    # Create a backup
    expire_time = datetime.utcnow() + timedelta(days=14)
    encryption_config = {
        'encryption_type': CreateBackupEncryptionConfig.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION,
        'kms_key_name': kms_key_name,
    }
    backup = instance.backup(backup_id, database=database, expire_time=expire_time, encryption_config=encryption_config)
    operation = backup.create()
    # Wait for backup operation to complete.
    operation.result(2100)
    # Verify that the backup is ready.
    backup.reload()
    assert backup.is_ready() is True
    # Get the name, create time, backup size and encryption key.
    backup.reload()
    print(
        "Backup {} of size {} bytes was created at {} using encryption key {}".format(
            backup.name, backup.size_bytes, backup.create_time, kms_key_name
        )
    )
# [END spanner_create_backup_with_encryption_key]
# [START spanner_restore_backup]
def restore_database(instance_id, new_database_id, backup_id):
    """Restores a database from a backup."""
    spanner_client = spanner.Client()
    instance = spanner_client.instance(instance_id)
    # Create a backup on database_id.
    # Start restoring an existing backup to a new database.
    backup = instance.backup(backup_id)
    new_database = instance.database(new_database_id)
    operation = new_database.restore(backup)
    # Wait for restore operation to complete.
    operation.result(1600)
    # Newly created database has restore information.
    new_database.reload()
    restore_info = new_database.restore_info
    print(
        "Database {} restored to {} from backup {} with version time {}.".format(
            restore_info.backup_info.source_database,
            new_database_id,
            restore_info.backup_info.backup,
            restore_info.backup_info.version_time
        )
    )
# [END spanner_restore_backup]
# [START spanner_restore_backup_with_encryption_key]
def restore_database_with_encryption_key(instance_id, new_database_id, backup_id, kms_key_name):
    """Restores a database from a backup using a Customer Managed Encryption Key (CMEK)."""
    from google.cloud.spanner_admin_database_v1 import RestoreDatabaseEncryptionConfig
    spanner_client = spanner.Client()
    instance = spanner_client.instance(instance_id)
    # Start restoring an existing backup to a new database.
    backup = instance.backup(backup_id)
    encryption_config = {
        'encryption_type': RestoreDatabaseEncryptionConfig.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION,
        'kms_key_name': kms_key_name,
    }
    new_database = instance.database(new_database_id, encryption_config=encryption_config)
    operation = new_database.restore(backup)
    # Wait for restore operation to complete.
    operation.result(1600)
    # Newly created database has restore information.
    new_database.reload()
    restore_info = new_database.restore_info
    print(
        "Database {} restored to {} from backup {} with using encryption key {}.".format(
            restore_info.backup_info.source_database,
            new_database_id,
            restore_info.backup_info.backup,
            new_database.encryption_config.kms_key_name,
        )
    )
# [END spanner_restore_backup_with_encryption_key]
# [START spanner_cancel_backup_create]
def cancel_backup(instance_id, database_id, backup_id):
    spanner_client = spanner.Client()
    instance = spanner_client.instance(instance_id)
    database = instance.database(database_id)
    expire_time = datetime.utcnow() + timedelta(days=30)
    # Create a backup.
    backup = instance.backup(backup_id, database=database, expire_time=expire_time)
    operation = backup.create()
    # Cancel backup creation.
    operation.cancel()
    # Cancel operations are best effort so either it will complete or
    # be cancelled.
    while not operation.done():
        time.sleep(300)  # 5 mins
    # Deal with resource if the operation succeeded.
    if backup.exists():
        print("Backup was created before the cancel completed.")
        backup.delete()
        print("Backup deleted.")
    else:
        print("Backup creation was successfully cancelled.")
# [END spanner_cancel_backup_create]
# [START spanner_list_backup_operations]
def list_backup_operations(instance_id, database_id):
    spanner_client = spanner.Client()
    instance = spanner_client.instance(instance_id)
    # List the CreateBackup operations.
    filter_ = (
        "(metadata.@type:type.googleapis.com/"
        "google.spanner.admin.database.v1.CreateBackupMetadata) "
        "AND (metadata.database:{})"
    ).format(database_id)
    operations = instance.list_backup_operations(filter_=filter_)
    for op in operations:
        metadata = op.metadata
        print(
            "Backup {} on database {}: {}% complete.".format(
                metadata.name, metadata.database, metadata.progress.progress_percent
            )
        )
# [END spanner_list_backup_operations]
# [START spanner_list_database_operations]
def list_database_operations(instance_id):
    spanner_client = spanner.Client()
    instance = spanner_client.instance(instance_id)
    # List the progress of restore.
    filter_ = (
        "(metadata.@type:type.googleapis.com/"
        "google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata)"
    )
    operations = instance.list_database_operations(filter_=filter_)
    for op in operations:
        print(
            "Database {} restored from backup is {}% optimized.".format(
                op.metadata.name, op.metadata.progress.progress_percent
            )
        )
# [END spanner_list_database_operations]
# [START spanner_list_backups]
def list_backups(instance_id, database_id, backup_id):
    spanner_client = spanner.Client()
    instance = spanner_client.instance(instance_id)
    # List all backups.
    print("All backups:")
    for backup in instance.list_backups():
        print(backup.name)
    # List all backups that contain a name.
    print('All backups with backup name containing "{}":'.format(backup_id))
    for backup in instance.list_backups(filter_="name:{}".format(backup_id)):
        print(backup.name)
    # List all backups for a database that contains a name.
    print('All backups with database name containing "{}":'.format(database_id))
    for backup in instance.list_backups(filter_="database:{}".format(database_id)):
        print(backup.name)
    # List all backups that expire before a timestamp.
    expire_time = datetime.utcnow().replace(microsecond=0) + timedelta(days=30)
    print(
        'All backups with expire_time before "{}-{}-{}T{}:{}:{}Z":'.format(
            *expire_time.timetuple()
        )
    )
    for backup in instance.list_backups(
        filter_='expire_time < "{}-{}-{}T{}:{}:{}Z"'.format(*expire_time.timetuple())
    ):
        print(backup.name)
    # List all backups with a size greater than some bytes.
    print("All backups with backup size more than 100 bytes:")
    for backup in instance.list_backups(filter_="size_bytes > 100"):
        print(backup.name)
    # List backups that were created after a timestamp that are also ready.
    create_time = datetime.utcnow().replace(microsecond=0) - timedelta(days=1)
    print(
        'All backups created after "{}-{}-{}T{}:{}:{}Z" and are READY:'.format(
            *create_time.timetuple()
        )
    )
    for backup in instance.list_backups(
        filter_='create_time >= "{}-{}-{}T{}:{}:{}Z" AND state:READY'.format(
            *create_time.timetuple()
        )
    ):
        print(backup.name)
    print("All backups with pagination")
    # If there are multiple pages, additional ``ListBackup``
    # requests will be made as needed while iterating.
    for backup in instance.list_backups(page_size=2):
        print(backup.name)
# [END spanner_list_backups]
# [START spanner_delete_backup]
def delete_backup(instance_id, backup_id):
    spanner_client = spanner.Client()
    instance = spanner_client.instance(instance_id)
    backup = instance.backup(backup_id)
    backup.reload()
    # Wait for databases that reference this backup to finish optimizing.
    while backup.referencing_databases:
        time.sleep(30)
        backup.reload()
    # Delete the backup.
    backup.delete()
    # Verify that the backup is deleted.
    assert backup.exists() is False
    print("Backup {} has been deleted.".format(backup.name))
# [END spanner_delete_backup]
# [START spanner_update_backup]
def update_backup(instance_id, backup_id):
    spanner_client = spanner.Client()
    instance = spanner_client.instance(instance_id)
    backup = instance.backup(backup_id)
    backup.reload()
    # Expire time must be within 366 days of the create time of the backup.
    old_expire_time = backup.expire_time
    new_expire_time = old_expire_time + timedelta(days=30)
    backup.update_expire_time(new_expire_time)
    print(
        "Backup {} expire time was updated from {} to {}.".format(
            backup.name, old_expire_time, new_expire_time
        )
    )
# [END spanner_update_backup]
# [START spanner_create_database_with_version_retention_period]
def create_database_with_version_retention_period(instance_id, database_id, retention_period):
    """Creates a database with a version retention period."""
    spanner_client = spanner.Client()
    instance = spanner_client.instance(instance_id)
    ddl_statements = [
        "CREATE TABLE Singers ("
        + "  SingerId   INT64 NOT NULL,"
        + "  FirstName  STRING(1024),"
        + "  LastName   STRING(1024),"
        + "  SingerInfo BYTES(MAX)"
        + ") PRIMARY KEY (SingerId)",
        "CREATE TABLE Albums ("
        + "  SingerId     INT64 NOT NULL,"
        + "  AlbumId      INT64 NOT NULL,"
        + "  AlbumTitle   STRING(MAX)"
        + ") PRIMARY KEY (SingerId, AlbumId),"
        + "  INTERLEAVE IN PARENT Singers ON DELETE CASCADE",
        "ALTER DATABASE `{}`"
        " SET OPTIONS (version_retention_period = '{}')".format(
            database_id, retention_period
        )
    ]
    db = instance.database(database_id, ddl_statements)
    operation = db.create()
    operation.result(30)
    db.reload()
    print("Database {} created with version retention period {} and earliest version time {}".format(
        db.database_id, db.version_retention_period, db.earliest_version_time
    ))
    db.drop()
# [END spanner_create_database_with_version_retention_period]
if __name__ == "__main__":  # noqa: C901
    parser = argparse.ArgumentParser(
        description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
    )
    parser.add_argument("instance_id", help="Your Cloud Spanner instance ID.")
    parser.add_argument(
        "--database-id", help="Your Cloud Spanner database ID.", default="example_db"
    )
    parser.add_argument(
        "--backup-id", help="Your Cloud Spanner backup ID.", default="example_backup"
    )
    subparsers = parser.add_subparsers(dest="command")
    subparsers.add_parser("create_backup", help=create_backup.__doc__)
    subparsers.add_parser("cancel_backup", help=cancel_backup.__doc__)
    subparsers.add_parser("update_backup", help=update_backup.__doc__)
    subparsers.add_parser("restore_database", help=restore_database.__doc__)
    subparsers.add_parser("list_backups", help=list_backups.__doc__)
    subparsers.add_parser("list_backup_operations", help=list_backup_operations.__doc__)
    subparsers.add_parser(
        "list_database_operations", help=list_database_operations.__doc__
    )
    subparsers.add_parser("delete_backup", help=delete_backup.__doc__)
    args = parser.parse_args()
    if args.command == "create_backup":
        create_backup(args.instance_id, args.database_id, args.backup_id)
    elif args.command == "cancel_backup":
        cancel_backup(args.instance_id, args.database_id, args.backup_id)
    elif args.command == "update_backup":
        update_backup(args.instance_id, args.backup_id)
    elif args.command == "restore_database":
        restore_database(args.instance_id, args.database_id, args.backup_id)
    elif args.command == "list_backups":
        list_backups(args.instance_id, args.database_id, args.backup_id)
    elif args.command == "list_backup_operations":
        list_backup_operations(args.instance_id, args.database_id)
    elif args.command == "list_database_operations":
        list_database_operations(args.instance_id)
    elif args.command == "delete_backup":
        delete_backup(args.instance_id, args.backup_id)
    else:
        print("Command {} did not match expected commands.".format(args.command))
 | |
| 
	from __future__ import print_function, division
import numpy as np
import nibabel as nib
import pandas as pd
import sys
import os
from numpy.testing import assert_almost_equal, assert_array_equal
#uppath = lambda _path, n: os.sep.join(_path.split(os.sep)[:-n])
__file__ = os.getcwd()
#sys.path.append(uppath(__file__, 1))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),"../utils/")))
from scenes import on_off_course, multiple_factors_course,gen_sample_by_factors,get_training_samples,get_tst_samples,make_label_by_time,other_scene_ids,analyze_performance
## Create the test image data
shape_3d = (40, 40, 20)
V = np.prod(shape_3d)
T = 438
arr_2d = np.random.normal(size=(V, T))
expected_stds = np.std(arr_2d, axis=0)
arr_4d = np.reshape(arr_2d, shape_3d + (T,))
## Create the test scenes data
scenes=np.array([[  17,  255,    1,   66],
       [ 272,   39,    1,   26],
       [ 311,    6,    1,   36],
       [ 317,   26,    1,   29],
       [ 343,   42,    1,   36],
       [ 385,   49,    1,   38],
       [ 434,   14,    1,   61],
       [ 448,   52,    1,   69],
       [ 500,    7,    1,   69],
       [ 507,    9,    1,   69],
       [ 516,   40,    1,   38],
       [ 556,   30,    1,   40],
       [ 586,    8,    1,   38],
       [ 594,   75,    1,   38],
       [ 669,   33,    1,   36],
       [ 702,   10,    1,   66],
       [ 712,   40,    1,   68],
       [ 752,   26,    1,   67],
       [ 778,   13,    1,   66],
       [ 791,   56,    1,   67],
       [ 847,   18,    1,   73],
       [ 865,   20,    1,   73],
       [ 885,   14,    1,   66],
       [ 899,   80,    1,    1],
       [ 979,   12,    1,   66],
       [ 991,   18,    1,    1],
       [1009,    9,    1,   65],
       [1018,    8,    1,   36],
       [1026,   84,    1,   56],
       [1110,   13,    1,   35],
       [1123,   12,    1,   38],
       [1135,    9,    1,   37],
       [1144,   27,    1,    1],
       [1171,    1,    1,   17],
       [1172,   22,    1,    1],
       [1194,    7,    1,   66],
       [1201,   40,    1,   31],
       [1241,   51,    1,   32],
       [1292,   92,    1,   33],
       [1384,   13,    1,   33],
       [1397,  128,    1,   34],
       [1525,   20,    1,   32],
       [1545,    4,    1,   46],
       [1549,    7,    1,   87],
       [1556,   24,    1,   86],
       [1580,   19,    1,   87],
       [1599,   20,    1,   88],
       [1619,    4,    1,   70],
       [1623,    5,    1,   71],
       [1628,    5,    1,   66],
       [1633,   28,    1,   22],
       [1661,   74,    1,    7],
       [1735,    4,    1,   50],
       [1739,    5,    1,   49],
       [1744,   14,    1,    7],
       [1758,   38,    1,    9],
       [1796,   49,    1,    9],
       [1845,   11,    1,    9],
       [1856,   15,    1,    9],
       [1871,   44,    1,    9],
       [1915,  113,    1,   21],
       [2028,  122,    1,   51],
       [2150,  158,    1,   81],
       [2308,    2,    1,   62],
       [2310,    1,    1,    2],
       [2311,    1,    1,   89],
       [2312,    3,    1,   90],
       [2315,   44,    1,   81],
       [2359,   16,    1,   82],
       [2375,   41,    1,   83],
       [2416,   72,    1,   79],
       [2488,   11,    1,   82],
       [2499,    5,    1,   80],
       [2504,    4,    1,   80],
       [2508,    3,    1,   80],
       [2511,    5,    1,   80],
       [2516,   66,    1,   78],
       [2582,   18,    1,   78],
       [2600,   11,    1,   35],
       [2611,    7,    1,   78],
       [2618,  441,    1,   80],
       [3059,   34,    1,   66],
       [3093,   78,    1,   52],
       [3171,   80,    1,   52],
       [3251,   16,    1,   52],
       [3267,   26,    1,   52],
       [3293,   31,    1,    8],
       [3324,   10,    1,    8],
       [3334,  244,    1,   84],
       [3578,    9,    1,   84],
       [3587,  113,    1,   14],
       [3700,   52,    1,   84],
       [3752,    4,    1,   30],
       [3756,    7,    1,   19],
       [3763,   14,    1,   45],
       [3777,  134,    1,   84],
       [3911,   27,    1,   76],
       [3938,    3,    1,   66],
       [3941,   17,    1,   60],
       [3958,    4,    1,   66],
       [3962,   58,    1,   75],
       [4020,   58,    1,   57],
       [4078,   33,    1,   54],
       [4111,  102,    1,   23],
       [4213,   96,    1,   55],
       [4309,   33,    1,    6],
       [4342,   32,    1,   55],
       [4374,    4,    1,   86],
       [4378,   13,    1,   66],
       [4391,   17,    1,   86],
       [4408,   23,    1,   85],
       [4431,   56,    1,   43],
       [4487,   57,    1,   38],
       [4544,   19,    1,   11],
       [4563,   25,    1,   16],
       [4588,   16,    1,   10],
       [4604,   24,    1,   16],
       [4628,   30,    1,   13],
       [4658,   31,    1,   10],
       [4689,   19,    1,   25],
       [4708,    5,    1,   10],
       [4713,   24,    1,    5],
       [4737,    5,    1,    4],
       [4742,   58,    1,    3],
       [4800,   18,    1,   13],
       [4818,   98,    1,   10],
       [4916,   57,    1,   13],
       [4973,   16,    1,   20],
       [4989,   25,    1,   13],
       [5014,   50,    1,   13],
       [5064,    2,    1,   38],
       [5066,    7,    1,   10],
       [5073,   16,    1,   38],
       [5089,   10,    1,   13],
       [5099,   67,    1,   66],
       [5166,   41,    1,   13],
       [5207,   30,    1,   66],
       [5237,    5,    1,   31],
       [5242,   11,    1,   42],
       [5253,    4,    1,   66],
       [5257,    5,    1,   20],
       [5262,   10,    1,   20],
       [5272,    6,    1,   12],
       [5278,   12,    1,   11],
       [5290,    1,    1,   66],
       [5291,    7,    1,   28],
       [5298,    8,    1,   31],
       [5306,   42,    1,   39],
       [5348,   81,    1,   38],
       [5429,   16,    1,   38],
       [5445,   18,    1,   27],
       [5463,   10,    1,   73],
       [5473,    8,    1,   38],
       [5481,    7,    1,   38],
       [5488,   19,    1,   38],
       [5507,   13,    1,   63],
       [5520,   84,    1,   38],
       [5604,    4,    1,   38],
       [5608,   54,    1,   38],
       [5662,   26,    1,   38],
       [5688,   48,    1,   38],
       [5736,   50,    1,   41],
       [5786,    4,    1,    8],
       [5790,   14,    1,   64],
       [5804,    6,    1,   66],
       [5810,   31,    1,   59],
       [5841,   12,    1,   66],
       [5853,   47,    1,   44],
       [5900,    8,    1,    8],
       [5908,   14,    1,   18],
       [5922,   16,    1,   15],
       [5938,    2,    1,   66],
       [5940,    5,    1,   72],
       [5945,    5,    1,   66],
       [5950,   23,    1,   72],
       [5973,   42,    1,   66],
       [6015,   30,    1,   74],
       [6045,   89,    1,   24],
       [6134,   19,    1,   38],
       [6153,   47,    1,   66],
       [6200,  218,    1,   47],
       [6418,   55,    1,   58],
       [6473,  140,    1,   38],
       [6613,   45,    1,   38],
       [6658,   17,    1,   77],
       [6675,    9,    1,   13],
       [6684,   11,    1,   53],
       [6695,   13,    1,   24],
       [6708,   30,    1,   38],
       [6738,   15,    1,   48],
       [6753,    5,    1,   56],
       [6758,   19,    1,   48],
       [6777,    9,    1,   38],
       [6786,   13,    1,   48],
       [6799,    7,    1,   38],
       [6806,    7,    1,   63],
       [6813,  131,    1,   48],
       [6944,  142,    1,   68]])
       
#sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),"../../data")))
#scenes = pd.read_csv('scene_times_nums.csv', header = None) 
#scenes = scenes.values
TR = 2
NUM_VOLUMES = arr_4d.shape[-1] 
ONSET_TIMES = scenes[:,0] 
ONSET_TIMES_NORMED = ONSET_TIMES - 17 #First recorded scene occurs at t = 17 sec 
DURATION = scenes[:,1] 
LABELS = scenes[:,3]
SCAN_TIMES =  np.arange(start=0, stop=2*NUM_VOLUMES, step=2)
#Creates a list that tells us scene id at given scan time 
factor_grid = []
for scan_time in SCAN_TIMES:
    index_list = np.where(ONSET_TIMES_NORMED < scan_time)[0]
    if scan_time == 0:
        label_index = 0
    else:
        label_index = index_list[-1] 
    factor_id = LABELS[label_index]
    factor_grid.append(factor_id)
    
factor_grid = np.array(factor_grid) #Convert to np array for future analysis
ALL_IDS = list(range(1, 91))
#############################################################################
     
def test_on_off_course():
    f1 = on_off_course([26],factor_grid)
    r1 = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
       1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
       0])
    assert_almost_equal(f1,r1)
def test_multiple_factors_course():
    f2 = multiple_factors_course([66], factor_grid)
    r2 =np.array([66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66,
       66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66,
       66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66,
       66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66,
       66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66,
       66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66,
       66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66,
       66, 66, 66, 66, 66, 66, 66, 66, 66,  0,  0,  0,  0,  0,  0,  0,  0,
        0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
        0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
        0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
        0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
        0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
        0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
        0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
        0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
        0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
        0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
        0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
        0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
        0,  0,  0, 66, 66, 66, 66, 66,  0,  0,  0,  0,  0,  0,  0,  0,  0,
        0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
        0,  0,  0,  0,  0,  0,  0, 66, 66, 66, 66, 66, 66, 66,  0,  0,  0,
        0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
        0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,
        0,  0,  0,  0,  0,  0,  0,  0,  0,  0, 66, 66, 66])
    assert_almost_equal(f2,r2)
test_GUMP_SCENES_IDS = [26, 36, 25, 38]
samp_gump, miss_gump = gen_sample_by_factors(test_GUMP_SCENES_IDS, factor_grid, False)
 
def test_gen_sample_by_factors():
    test_GUMP_SCENES_IDS = [26, 36, 25, 38]
    g1=gen_sample_by_factors(test_GUMP_SCENES_IDS, factor_grid, False)
    f3=list(g1[0].values())[0]
    r3=(np.array([128, 129, 130, 131, 132, 133, 134, 135, 136, 137]),
        np.array([138, 139, 140, 141, 142, 143, 144, 145, 146, 147]))
    f4=g1[1]
    r4=[25]
    assert_almost_equal(f3,r3)
    assert_almost_equal(f4,r4)
def test_get_training_samples():
    r5 = np.array([128, 129, 130, 131, 132, 133, 134, 135, 136, 137])
    f5= list(get_training_samples(samp_gump).values())[0]
    assert_almost_equal(f5,r5)
    
def test_get_tst_samples():
    f7 =list(get_tst_samples(samp_gump).values())[1]
    r7 =np.array([181, 182, 183, 184, 327, 328, 329, 330, 331, 332, 333, 334, 335,
       336, 337, 338, 339, 340, 341, 342])
    assert_almost_equal(f7,r7)
    
def test_make_label_by_time():
    g2= make_label_by_time(get_training_samples(samp_gump))
    r2 =(np.array([26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 36, 36, 36, 36, 36, 36, 36,
        36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 38, 38, 38, 38,
        38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38,
        38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38,
        38, 38, 38, 38, 38]),
 np.array([128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 148, 149, 150,
        164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176,
        177, 178, 179, 180, 185, 186, 187, 188, 189, 190, 191, 192, 193,
        194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206,
        207, 208, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260,
        261, 262, 263, 264, 265, 266, 267, 268]))
    assert_almost_equal(g2,r2)
    
def test_other_scene_ids():
    test_GUMP_SCENES_IDS = [26, 36, 25, 38]
    f6= other_scene_ids(test_GUMP_SCENES_IDS)
    similar = []
    for tup in test_GUMP_SCENES_IDS:
        if tup in f6:
            similar.append(tup)
    r6 =[]
    assert_almost_equal(similar,r6)
    
def test_analyze_performance():
    predicted_labels=np.array([26,27,28,78,66,39])
    actual_labels=np.array([26,38,39,78,39,29])
    f8 =analyze_performance(predicted_labels, actual_labels)
    r8 = 0.33333333333333337
    assert_almost_equal(f8,r8)
 | |
| 
	#
# Copyright (c) 2010-2011, Nick Blundell
# All rights reserved.
# 
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#     * Redistributions of source code must retain the above copyright
#       notice, this list of conditions and the following disclaimer.
#     * Redistributions in binary form must reproduce the above copyright
#       notice, this list of conditions and the following disclaimer in the
#       documentation and/or other materials provided with the distribution.
#     * Neither the name of Nick Blundell nor the
#       names of its contributors may be used to endorse or promote products
#       derived from this software without specific prior written permission.
# 
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# 
#
#
# Author: Nick Blundell <blundeln [AT] gmail [DOT] com>
# Organisation: www.nickblundell.org.uk
# 
# Description:
#   Defines a Rollbackable class and util functions.
#
import copy
from debug import *
from exceptions import *
class Rollbackable(object) :
  """
  A class that can have its state rolled back, to undo modifications.
  A blanket deepcopy is not ideal, though we can explore more efficient
  solutions later (e.g. copy-before-modify).
  """
  # XXX: Do we always need to copy on get AND set? Have to careful that original state is not set.
  # XXX: Basically need to make sure that original state cannot be modified
  # XXX: Perhaps add copy-flag
  def _get_state(self, copy_state=True) :
    """
    Gets the state of this object that is required for rollback.  This is a
    catch-all function for classes that specialise Rollbackable, though where
    possible more efficient functions should be implemented.
    Usually we will wish to obtain a copy, so the original state is not
    modified, though sometimes (e.g. when comparing states) we will not require
    a copy.
    """
    if copy_state :
      return copy.deepcopy(self.__dict__)
    else :
      return self.__dict__
      
  def _set_state(self, state, copy_state=True) :
    """
    Sets the state of this object for rollback.  This is a
    catch-all function for classes that specialise Rollbackable, though where
    possible more efficient functions should be implemented.
    Usually we will wish to set a copy, so the original state is not
    modified, though sometimes we will not require
    a copy (e.g. if we know the original state will no longer be required).
    """
    if copy_state :
      self.__dict__ = copy.deepcopy(state)
    else :
      self.__dict__ = state
  def __eq__(self, other):
    """So we can easily compare if two objects have state of equal value."""
    # TODO: To use this is expensive and should be replaced by a more
    # efficient method
    # TODO:   perhaps a dirty-flag scheme???
    return self.__class__ == other.__class__ and self.__dict__ == other.__dict__
  @staticmethod
  def TESTS():
    class SomeClass(Rollbackable):
      def __init__(self, x, y) :
        self.x, self.y = x, y
    o = SomeClass(1, [3,4])
    state1 = o._get_state()
    o.x = 3
    o.y.append(16)
    assert(o.x == 3)
    assert(o.y == [3,4,16])
    o._set_state(state1)
    assert(o.x == 1)
    assert(o.y == [3,4])
    # Test value comparision.
    o1 = SomeClass(1, [3,4])
    o2 = SomeClass(1, [3,4])
    assert(o1 == o2)
    o2.y[1] = 9
    assert(o1 != o2)
    o2.y[1] = 4
    assert(o1 == o2)
#
# Utility functions for getting and setting the state of multiple rollbackables.
#
def get_rollbackables_state(*rollbackables, **kargs) :
  """Handy function to get the state of multiple rollbackables, conviently ignoring those with value None."""
  # Assume we copy state, unless directed otherwise.
  if "copy_state" in kargs and kargs["copy_state"] == False:
    copy_state = False
  else :
    copy_state = True
  # Note: rollbackables must be in same order for get and set.
  rollbackables_state = []
  for rollbackable in rollbackables :
    if isinstance(rollbackable, Rollbackable) :
      rollbackables_state.append(rollbackable._get_state(copy_state=copy_state))
  
  #if IN_DEBUG_MODE :
  #  d("Getting state : %s" % rollbackables_state)
  
  return rollbackables_state
def set_rollbackables_state(new_rollbackables_state, *rollbackables, **kargs) :
  """Handy function to set the state of multiple rollbackables, conviently ignoring those with value None."""
  # Assume we copy state, unless directed otherwise.
  if "copy_state" in kargs and kargs["copy_state"] == False:
    copy_state = False
  else :
    copy_state = True
  #if IN_DEBUG_MODE :
  #  d("Setting state to: %s" % new_rollbackables_state)
  state_index = 0
  for rollbackable in rollbackables:
    if isinstance(rollbackable, Rollbackable) :
      rollbackable._set_state(new_rollbackables_state[state_index], copy_state=copy_state)
      state_index += 1
class automatic_rollback:
  """
  Allows rollback of reader state using the 'with' statement, for cleaner
  syntax.
  Possible extensions:
  """
  
  def __init__(self, *rollbackables, **kargs) :
    # Store the rollbackables. Note, for convenience, allow rollbackables to be None (i.e. store only Reader instances)
    self.some_state_changed = False
    self.check_for_state_change = "check_for_state_change" in kargs and kargs["check_for_state_change"] or None
    # Allows initial state to be reused.
    self.initial_state = "initial_state" in kargs and kargs["initial_state"] or None
    self.rollbackables = rollbackables
  
  def __enter__(self) :
    # Store the start state of each reader, unless we have been passed some
    # initial state to reuse.
    if self.initial_state :
      self.start_state = self.initial_state
    else :
      self.start_state = get_rollbackables_state(*self.rollbackables)
  
  def __exit__(self, type, value, traceback) :
    # If a RollbackException is thrown, revert all of the rollbackables.
    if type and issubclass(type, RollbackException) :
      set_rollbackables_state(self.start_state, *self.rollbackables)
      d("Rolled back rollbackables to: %s." % str(self.rollbackables))
   
    # XXX: Optimise this to first check for concrete reader.
    if self.check_for_state_change :
      # Not changing this state, so no need to copy it.
      current_state = get_rollbackables_state(*self.rollbackables, copy_state=False)
      #d("State: start: %s current: %s" % (self.start_state, current_state))
      self.some_state_changed = current_state != self.start_state
    # Note, by not returning True, we do not supress the exception, which gives
    # us maximum flexibility.
  @staticmethod
  def TESTS() :
    class SomeClass(Rollbackable):
      def __init__(self, x, y) :
        self.x, self.y = x, y
    o_1 = SomeClass(1, [3,4])
    o_2 = None                # Important that we can handle None to simplify code.
    o_3 = SomeClass(1, [3,4])
   
    try :
      with automatic_rollback(o_1, o_2, o_3):
        o_1.x = 3
        o_3.y.append(16)
        assert(o_1.x == 3)
        assert(o_3.y == [3,4,16])
        raise LensException() # In practice we will usually use LensException
    except LensException:
      pass # Don't wish to stop test run.
       
    # Check we rolled back.
    assert(o_1.x == 1)
    assert(o_3.y == [3,4])
 | |
| 
	'''namedtuple defs which represent the data structures defined in RFC 6962 -
Certificate Transparency.
'''
import struct
from pyasn1.codec.der.encoder import encode as der_encoder
from pyasn1.codec.der.decoder import decode as der_decoder
from pyasn1_modules import rfc5280
from utlz import flo, namedtuple as namedtuple_utlz
from ctutlz.utils.tdf_bytes import TdfBytesParser, namedtuple
from ctutlz.utils.encoding import decode_from_b64, encode_to_b64
from ctutlz.utils.string import to_hex
from ctutlz.sct.ee_cert import tbscert_without_ct_extensions
# tdf := "TLS Data Format" (cf. https://tools.ietf.org/html/rfc5246#section-4)
# 3.1. Log Entries
# https://tools.ietf.org/html/rfc6962#section-3.1
def _parse_log_entry_type(tdf):
    with TdfBytesParser(tdf) as parser:
        parser.read('val', '!H')  # (65535) -> 2 bytes
        return parser.result()
LogEntryType = namedtuple(
    typename='LogEntryType',
    field_names='arg',
    lazy_vals={
        '_parse_func': lambda _: _parse_log_entry_type,
        'val': lambda self: self._parse['val'],
        'is_x509_entry': lambda self: self.val == 0,
        'is_precert_entry': lambda self: self.val == 1,
        '__str__': lambda self: lambda:
            'x509_entry' if self.is_x509_entry else
            'precert_entry' if self.is_precert_entry else
            flo('<unknown log entry type {self.tdf}>'),
    }
)
def _parse_log_entry(tdf):
    with TdfBytesParser(tdf) as parser:
        entry_type = LogEntryType(
            parser.delegate('entry_type', _parse_log_entry_type))
        # parse entry
        if entry_type.is_x509_entry:
            parser.delegate('entry', _parse_x509_chain_entry)
            parser.res['x509_entry'] = parser.res['entry']
        elif entry_type.is_precert_entry:
            parser.delegate('entry', _parse_precert_chain_entry)
            parser.res['precert_entry'] = parser.res['entry']
        else:
            raise Exception(flo('Unknown entry_type: {entry_type}'))
        return parser.result()
LogEntry = namedtuple(
    typename='LogEntry',
    field_names='arg',
    lazy_vals={
        '_parse_func': lambda _: _parse_log_entry,
        'entry_type': lambda self: LogEntryType(self._parse['entry_type']),
        'entry': lambda self:
            ASN1Cert(self._parse['entry'])
                if self.entry_type.is_x509_entry else
            PreCert(self._parse['entry'])
                if self.entry_type.is_precert_entry else
            None,
    }
)
def _parse_asn1_cert(tdf):
    with TdfBytesParser(tdf) as parser:
        parser.read('len1', '!B')
        parser.read('len2', '!B')
        parser.read('len3', '!B')
        der_len = struct.unpack('=I', struct.pack('!4B',
                                                  0,
                                                  parser.res['len1'],
                                                  parser.res['len2'],
                                                  parser.res['len3']))[0]
        parser.res['der_len'] = der_len
        parser.read('der', flo('!{der_len}s'))
        return parser.result()
ASN1Cert = namedtuple(
    typename='ASN1Cert',
    field_names='arg',
    lazy_vals={
        '_parse_func': lambda _: _parse_asn1_cert,
        'der': lambda self: self._parse['der'],
        'pyasn1': lambda self: der_decoder(self.der, rfc5280.Certificate()),
    }
)
def _parse_asn1_cert_list(tdf):
    with TdfBytesParser(tdf) as parser:
        parser.read('len1', '!B')
        parser.read('len2', '!B')
        parser.read('len3', '!B')
        der_list_len = struct.unpack('=I', struct.pack('!4B',
                                                       0,
                                                       parser.res['len1'],
                                                       parser.res['len2'],
                                                       parser.res['len3']))[0]
        der_end_offset = parser.offset + der_list_len
        list_of_parse_asn1_cert = []
        while parser.offset < der_end_offset:
            parse_asn1_cert = parser.delegate(_parse_asn1_cert)
            list_of_parse_asn1_cert.append(parse_asn1_cert)
        parser.res['der_list_len'] = der_list_len
        parser.res['list_of_parse_asn1_cert'] = list_of_parse_asn1_cert
        return parser.result()
ASN1CertList = namedtuple(
    typename='ASN1CertList',
    field_names='arg',
    lazy_vals={
        '_parse_func': lambda _: _parse_asn1_cert_list,
        'certs': lambda self: [
            ASN1Cert(parse_asn1_cert)
            for parse_asn1_cert
            in self._parse['list_of_parse_asn1_cert']
        ],
    }
)
def _parse_x509_chain_entry(tdf):
    with TdfBytesParser(tdf) as parser:
        parser.delegate('leaf_certificate', _parse_asn1_cert),
        parser.delegate('certificate_chain', _parse_asn1_cert_list),
        return parser.result()
X509ChainEntry = namedtuple(
    typename='X509ChainEntry',
    field_names='arg',
    lazy_vals={
        '_parse_func': lambda _: _parse_x509_chain_entry,
        'leaf_certificate': lambda self:
            ASN1Cert(self._parse['leaf_certificate']),
        'certificate_chain': lambda self:
            ASN1CertList(self._parse['certificate_chain']),
    }
)
def _parse_precert_chain_entry(tdf):
    with TdfBytesParser(tdf) as parser:
        parser.delegate('pre_certificate', _parse_asn1_cert),
        parser.delegate('precert_chain', _parse_asn1_cert_list),
        return parser.result()
PrecertChainEntry = namedtuple(
    typename='PrecertChainEntry',
    field_names='arg',
    lazy_vals={
        '_parse_func': lambda _: _parse_precert_chain_entry,
        'pre_certificate': lambda self:
            ASN1Cert(self._parse['pre_certificate']),
        'precertificate_chain': lambda self:
            ASN1CertList(self._parse['precert_chain']),
    }
)
# 3.2 Structure of the Signed Certificate Timestamp
# https://tools.ietf.org/html/rfc6962#section-3.2
def _parse_signature_type(tdf):
    with TdfBytesParser(tdf) as parser:
        parser.read('val', '!B')
        return parser.result()
SignatureType = namedtuple(
    typename='SignatureType',
    field_names='arg',
    lazy_vals={
        '_parse_func': lambda _: _parse_signature_type,
        'val': lambda self: self._parse['val'],
        'is_certificate_timestamp': lambda self: self.val == 0,
        'is_tree_hash': lambda self: self.val == 1,
        '__str__': lambda self: lambda:
            'certificate_timestamp' if self.is_certificate_timestamp else
            'tree_hash' if self.is_tree_hash else
            '<unknown signature type>',
    }
)
def _parse_version(tdf):
    with TdfBytesParser(tdf) as parser:
        parser.read('val', '!B')
        return parser.result()
Version = namedtuple(
    typename='Version',
    field_names='arg',
    lazy_vals={
        '_parse_func': lambda _: _parse_version,
        'val': lambda self: int(self._parse['val']),
        'is_v1': lambda self: self.val == 0,
        '__str__': lambda self: lambda:
            'v1' if self.is_v1 else
            '<unkown version>',
    }
)
def _parse_log_id(tdf):
    with TdfBytesParser(tdf) as parser:
        parser.read('val', '!32s')
        return parser.result()
LogID = namedtuple(
    typename='LogID',
    lazy_vals={
        '_parse_func': lambda _: _parse_log_id,
        # type: int, '!L', [32]
        # https://docs.python.org/3/library/struct.html#format-characters
        'val': lambda self: bytes(self._parse['val']),
    },
)
def _parse_tbs_certificate(tdf):
    with TdfBytesParser(tdf) as parser:
        parser.read('len1', '!B')
        parser.read('len2', '!B')
        parser.read('len3', '!B')
        len_der = struct.unpack('=I', struct.pack('!4B',
                                                  0,
                                                  parser.res['len1'],
                                                  parser.res['len2'],
                                                  parser.res['len3']))[0]
        from_ = parser.offset
        parser.offset += len_der
        until = parser.offset
        parser.res['der'] = tdf[from_:until]
        return parser.result()
TBSCertificate = namedtuple(
    typename='TBSCertificate',
    field_names='arg',
    lazy_vals={
        '_parse_func': lambda _: _parse_tbs_certificate,
        'der': lambda self: bytes(self._parse['der']),
        'pyasn1': lambda self: der_decoder(self.der,
                                           asn1Spec=rfc5280.TBSCertificate()),
        'len': lambda self: len(self.der),
        'lens': lambda self: struct.unpack('!4B', struct.pack('!I', self.len)),
        'len1': lambda self: self.lens[1],
        'len2': lambda self: self.lens[2],
        'len3': lambda self: self.lens[3],
        'without_ct_extensions': lambda self:
            der_encoder(
                TBSCertificate(tbscert_without_ct_extensions(self.pyasn1))),
    }
)
def _parse_pre_cert(tdf):
    with TdfBytesParser(tdf) as parser:
        parser.read('issuer_key_hash', '!32s')
        parser.delegate('tbs_certificate', _parse_tbs_certificate)
        return parser.result()
PreCert = namedtuple(
    typename='PreCert',
    field_names='arg',
    lazy_vals={
        '_parse_func': lambda _: _parse_pre_cert,
        'issuer_key_hash': lambda self: bytes(self._parse['issuer_key_hash']),
        'tbs_certificate': lambda self:
            TBSCertificate(self._parse['tbs_certificate']),
    }
)
def _parse_ct_extensions(tdf):
    with TdfBytesParser(tdf) as parser:
        parser.read('len', '!H')
        parser.res['val'] = None  # "Currently, no extensions are specified"
        return parser.result()
CtExtensions = namedtuple(
    typename='CtExtensions',
    field_names='arg',
    lazy_vals={
        '_parse_func': lambda _: _parse_ct_extensions,
        'len': lambda self: self._parse['len'],
        'val': lambda self: self._parse['val'],
    }
)
def _parse_signed_certificate_timestamp(tdf):
    with TdfBytesParser(tdf) as parser:
        parser.delegate('version', _parse_version)
        parser.delegate('id', _parse_log_id)
        parser.read('timestamp', '!Q')
        parser.delegate('ct_extensions', _parse_ct_extensions)
        # digitally-signed struct
        parser.read('signature_alg_hash', '!B'),
        parser.read('signature_alg_sign', '!B'),
        signature_len = parser.read('signature_len', '!H')
        parser.read('signature', flo('!{signature_len}s'))
        return parser.result()
SignedCertificateTimestamp = namedtuple(
    typename='SignedCertificateTimestamp',
    field_names='arg',
    lazy_vals={
        '_parse_func': lambda _: _parse_signed_certificate_timestamp,
        'version': lambda self: Version(self._parse['version']),
        'id': lambda self: LogID(self._parse['id']),
        'timestamp': lambda self: int(self._parse['timestamp']),
        'extensions': lambda self: CtExtensions(self._parse['ct_extensions']),
        # digitally-signed struct
        # https://tools.ietf.org/html/rfc5246#section-4.7
        'signature_algorithm_hash': lambda self:
            int(self._parse['signature_alg_hash']),
        'signature_algorithm_signature': lambda self:
            int(self._parse['signature_alg_sign']),
        'signature_len': lambda self: int(self._parse['signature_len']),
        'signature': lambda self: bytes(self._parse['signature']),
        'log_id': lambda self: self.id,
        'log_id_b64': lambda self: encode_to_b64(self.log_id.tdf),  # type: str
        'version_hex': lambda self: to_hex(self.version.tdf),
        'timestamp_hex': lambda self: to_hex(self.timestamp),
        'extensions_len': lambda self: self.extensions.len,
        'extensions_len_hex': lambda self: to_hex(self.extensions_len),
        'signature_alg_hash_hex': lambda self:
            to_hex(self.signature_algorithm_hash),
        'signature_alg_sign_hex': lambda self:
            to_hex(self.signature_algorithm_sign),
        'signature_b64': lambda self: encode_to_b64(self.signature),  # str
        'b64': lambda self: encode_to_b64(self.tdf)  # str
    }
)
def _parse_signature_input(tdf):
    with TdfBytesParser(tdf) as parser:
        parser.delegate('sct_version', _parse_version)
        parser.delegate('signature_type', _parse_signature_type)
        # rest of the SignatureInput is identical to an TimestampedEntry
        parser.delegate('_tmp', _parse_timestamped_entry)
        parser.res.update(parser.res['_tmp'])
        del parser.res['_tmp']
        return parser.result()
# 'digitally-signed struct' of the SignedCertificateTimestamp
SignatureInput = namedtuple(
    typename='SignatureInput',
    field_names='arg',
    lazy_vals={
        '_parse_func': lambda _: _parse_signature_input,
        'sct_version': lambda self: Version(self._parse['sct_version']),
        'signature_type': lambda self:
            SignatureType(self._parse['signature_type']),
        'timestamp': lambda self: int(self._parse['timestamp']),
        'entry_type': lambda self: LogEntryType(self._parse['entry_type']),
        'signed_entry': lambda self:
            ASN1Cert(self._parse['signed_entry'])
                if self.entry_type.is_x509_entry else
            PreCert(self._parse['signed_entry'])
                if self.entry_type.is_precert_entry else
            None,
        'precert_entry': lambda self: self._parse.get('precert_entry', None),
        'x509_entry': lambda self: self._parse.get('x509_entry', None),
    }
)
# 3.4 Merkle Tree
# https://tools.ietf.org/html/rfc6962#section-3.4
def _parse_merkle_leaf_type(tdf):
    with TdfBytesParser(tdf) as parser:
        parser.read('val', '!B')  # (255)
        return parser.result()
MerkleLeafType = namedtuple(
    typename='MerkleLeafType',
    field_names='arg',
    lazy_vals={
        '_parse_func': lambda _: _parse_merkle_leaf_type,
        'val': lambda self: int(self._parse['val']),
        'is_timestamped_entry': lambda self: self.val == 0,
        '__str__': lambda self: lambda:
            'timestamped_entry' if self.is_timestamped_entry else
            '<unkown merkle leaf type>',
    }
)
def _parse_timestamped_entry(tdf):
    with TdfBytesParser(tdf) as parser:
        parser.read('timestamp', '!Q')  # uint64  -> 8 bytes
        entry_type = LogEntryType(
            parser.delegate('entry_type', _parse_log_entry_type))
        # parse leaf_entry
        if entry_type.is_x509_entry:
            parser.delegate('signed_entry', _parse_asn1_cert)
            parser.res['x509_entry'] = parser.res['signed_entry']
        elif entry_type.is_precert_entry:
            parser.delegate('signed_entry', _parse_pre_cert)
            parser.res['precert_entry'] = parser.res['signed_entry']
        else:
            raise Exception(flo('Unknown entry_type number: {entry_type}'))
        # TODO DEBUG ctlog_get_entries.py related (it looks like some log
        #                                          answers are missing
        #                                          the ct_extensions,
        #                                         or an error in parse routines)
        try:
            parser.delegate('extensions', _parse_ct_extensions)
        except struct.error:
            pass
        return parser.result()
TimestampedEntry = namedtuple(
    typename='TimestampedEntry',
    field_names='arg',
    lazy_vals={
        '_parse_func': lambda _: _parse_timestamped_entry,
        'timestamp': lambda self: int(self._parse.get('timestamp')),
        'entry_type': lambda self: LogEntryType(self._parse['entry_type']),
        'signed_entry': lambda self:
            ASN1Cert(self._parse['signed_entry'])
                if self.entry_type.is_x509_entry else
            PreCert(self._parse['signed_entry'])
                if self.entry_type.is_precert_entry else
            None,
        'extensions': lambda self: CtExtensions(self._parse.get('extensions')),
        'precert_entry': lambda self: self._parse.get('precert_entry', None),
        'x509_entry': lambda self: self._parse.get('x509_entry', None),
    }
)
def _parse_merkle_tree_leaf(tdf):
    with TdfBytesParser(tdf) as parser:
        parser.delegate('version', _parse_version)
        leaf_type = parser.delegate('leaf_type', _parse_merkle_leaf_type)
        if MerkleLeafType(leaf_type).is_timestamped_entry:
            parser.delegate('leaf_entry', _parse_timestamped_entry)
        else:
            raise Exception('unknown leaf_type: {leaf_type}!')
        return parser.result()
MerkleTreeLeaf = namedtuple(
    typename='MerkleTreeLeaf',
    field_names='arg',
    lazy_vals={
        '_parse_func': lambda _: _parse_merkle_tree_leaf,
        'version': lambda self: Version(self._parse['version']),
        'leaf_type': lambda self: MerkleLeafType(self._parse['leaf_type']),
        'leaf_entry': lambda self: TimestampedEntry(self._parse['leaf_entry']),
        # alias for 'leaf_entry'
        'timestamped_entry': lambda self: self.leaf_entry,
        '__str__': lambda self: lambda:
            self.__repr__(),
    }
)
# 4.6. Retrieve Entries from Log
# https://tools.ietf.org/html/rfc6962#section-4.6
GetEntriesInput = namedtuple_utlz(
    typename='GetEntriesInput',
    field_names=[
        'start',
        'end',
    ],
)
GetEntriesResponseEntry = namedtuple_utlz(
    typename='GetEntriesResponseEntry',
    field_names=[
        'json_dict',
    ],
    lazy_vals={
        # The base-64 encoded MerkleTreeLeaf structure
        'leaf_input_b64': lambda self: self.json_dict['leaf_input'],
        'leaf_input_tdf': lambda self: decode_from_b64(self.leaf_input_b64),
        'leaf_input': lambda self: MerkleTreeLeaf(self.leaf_input_tdf),
        'is_x509_chain_entry': lambda self:
            self.leaf_input.timestamped_entry.entry_type == 0,
        'is_precert_chain_entry': lambda self: not self.is_x509_chain_entry,
        # The base-64 encoded unsigned data pertaining to the log entry.  In the
        # case of an X509ChainEntry, this is the "certificate_chain".  In the
        # case of a PrecertChainEntry, this is the whole "PrecertChainEntry"
        'extra_data_b64': lambda self: self.json_dict['extra_data'],
        'extra_data_tdf': lambda self: decode_from_b64(self.extra_data_b64),
        'extra_data': lambda self:
            X509ChainEntry(self.extra_data_tdf) if self.is_x509_chain_entry else
            PrecertChainEntry(self.extra_data_tdf),
        # '__str__': lambda self: '<GetEntriesResponseEntry>',
    }
)
GetEntriesResponse = namedtuple_utlz(
    typename='GetEntriesResponse',
    field_names=[
        'json_dict',
    ],
    lazy_vals={
        'entries': lambda self: [GetEntriesResponseEntry(entry)
                                 for entry
                                 in self.json_dict['entries']],
        # for convenience
        'first_entry': lambda self: self.entries[0],
    },
)
 | |
| 
	#! /usr/bin/env python
"""Test the arraymodule.
   Roger E. Masse
"""
import unittest
from test import support
from weakref import proxy
import array, io, math
from pickle import loads, dumps, HIGHEST_PROTOCOL
import operator
class ArraySubclass(array.array):
    pass
class ArraySubclassWithKwargs(array.array):
    def __init__(self, typecode, newarg=None):
        array.array.__init__(typecode)
tests = [] # list to accumulate all tests
typecodes = "ubBhHiIlLfd"
class BadConstructorTest(unittest.TestCase):
    def test_constructor(self):
        self.assertRaises(TypeError, array.array)
        self.assertRaises(TypeError, array.array, spam=42)
        self.assertRaises(TypeError, array.array, 'xx')
        self.assertRaises(ValueError, array.array, 'x')
tests.append(BadConstructorTest)
class BaseTest(unittest.TestCase):
    # Required class attributes (provided by subclasses
    # typecode: the typecode to test
    # example: an initializer usable in the constructor for this type
    # smallerexample: the same length as example, but smaller
    # biggerexample: the same length as example, but bigger
    # outside: An entry that is not in example
    # minitemsize: the minimum guaranteed itemsize
    def assertEntryEqual(self, entry1, entry2):
        self.assertEqual(entry1, entry2)
    def badtypecode(self):
        # Return a typecode that is different from our own
        return typecodes[(typecodes.index(self.typecode)+1) % len(typecodes)]
    def test_constructor(self):
        a = array.array(self.typecode)
        self.assertEqual(a.typecode, self.typecode)
        self.assert_(a.itemsize>=self.minitemsize)
        self.assertRaises(TypeError, array.array, self.typecode, None)
    def test_len(self):
        a = array.array(self.typecode)
        a.append(self.example[0])
        self.assertEqual(len(a), 1)
        a = array.array(self.typecode, self.example)
        self.assertEqual(len(a), len(self.example))
    def test_buffer_info(self):
        a = array.array(self.typecode, self.example)
        self.assertRaises(TypeError, a.buffer_info, 42)
        bi = a.buffer_info()
        self.assert_(isinstance(bi, tuple))
        self.assertEqual(len(bi), 2)
        self.assert_(isinstance(bi[0], int))
        self.assert_(isinstance(bi[1], int))
        self.assertEqual(bi[1], len(a))
    def test_byteswap(self):
        a = array.array(self.typecode, self.example)
        self.assertRaises(TypeError, a.byteswap, 42)
        if a.itemsize in (1, 2, 4, 8):
            b = array.array(self.typecode, self.example)
            b.byteswap()
            if a.itemsize==1:
                self.assertEqual(a, b)
            else:
                self.assertNotEqual(a, b)
            b.byteswap()
            self.assertEqual(a, b)
    def test_copy(self):
        import copy
        a = array.array(self.typecode, self.example)
        b = copy.copy(a)
        self.assertNotEqual(id(a), id(b))
        self.assertEqual(a, b)
    def test_deepcopy(self):
        import copy
        a = array.array(self.typecode, self.example)
        b = copy.deepcopy(a)
        self.assertNotEqual(id(a), id(b))
        self.assertEqual(a, b)
    def test_pickle(self):
        for protocol in range(HIGHEST_PROTOCOL + 1):
            a = array.array(self.typecode, self.example)
            b = loads(dumps(a, protocol))
            self.assertNotEqual(id(a), id(b))
            self.assertEqual(a, b)
            a = ArraySubclass(self.typecode, self.example)
            a.x = 10
            b = loads(dumps(a, protocol))
            self.assertNotEqual(id(a), id(b))
            self.assertEqual(a, b)
            self.assertEqual(a.x, b.x)
            self.assertEqual(type(a), type(b))
    def test_pickle_for_empty_array(self):
        for protocol in range(HIGHEST_PROTOCOL + 1):
            a = array.array(self.typecode)
            b = loads(dumps(a, protocol))
            self.assertNotEqual(id(a), id(b))
            self.assertEqual(a, b)
            a = ArraySubclass(self.typecode)
            a.x = 10
            b = loads(dumps(a, protocol))
            self.assertNotEqual(id(a), id(b))
            self.assertEqual(a, b)
            self.assertEqual(a.x, b.x)
            self.assertEqual(type(a), type(b))
    def test_insert(self):
        a = array.array(self.typecode, self.example)
        a.insert(0, self.example[0])
        self.assertEqual(len(a), 1+len(self.example))
        self.assertEqual(a[0], a[1])
        self.assertRaises(TypeError, a.insert)
        self.assertRaises(TypeError, a.insert, None)
        self.assertRaises(TypeError, a.insert, 0, None)
        a = array.array(self.typecode, self.example)
        a.insert(-1, self.example[0])
        self.assertEqual(
            a,
            array.array(
                self.typecode,
                self.example[:-1] + self.example[:1] + self.example[-1:]
            )
        )
        a = array.array(self.typecode, self.example)
        a.insert(-1000, self.example[0])
        self.assertEqual(
            a,
            array.array(self.typecode, self.example[:1] + self.example)
        )
        a = array.array(self.typecode, self.example)
        a.insert(1000, self.example[0])
        self.assertEqual(
            a,
            array.array(self.typecode, self.example + self.example[:1])
        )
    def test_tofromfile(self):
        a = array.array(self.typecode, 2*self.example)
        self.assertRaises(TypeError, a.tofile)
        support.unlink(support.TESTFN)
        f = open(support.TESTFN, 'wb')
        try:
            a.tofile(f)
            f.close()
            b = array.array(self.typecode)
            f = open(support.TESTFN, 'rb')
            self.assertRaises(TypeError, b.fromfile)
            b.fromfile(f, len(self.example))
            self.assertEqual(b, array.array(self.typecode, self.example))
            self.assertNotEqual(a, b)
            self.assertRaises(EOFError, b.fromfile, f, len(self.example)+1)
            self.assertEqual(a, b)
            f.close()
        finally:
            if not f.closed:
                f.close()
            support.unlink(support.TESTFN)
    def test_tofromlist(self):
        a = array.array(self.typecode, 2*self.example)
        b = array.array(self.typecode)
        self.assertRaises(TypeError, a.tolist, 42)
        self.assertRaises(TypeError, b.fromlist)
        self.assertRaises(TypeError, b.fromlist, 42)
        self.assertRaises(TypeError, b.fromlist, [None])
        b.fromlist(a.tolist())
        self.assertEqual(a, b)
    def test_tofromstring(self):
        a = array.array(self.typecode, 2*self.example)
        b = array.array(self.typecode)
        self.assertRaises(TypeError, a.tostring, 42)
        self.assertRaises(TypeError, b.fromstring)
        self.assertRaises(TypeError, b.fromstring, 42)
        b.fromstring(a.tostring())
        self.assertEqual(a, b)
        if a.itemsize>1:
            self.assertRaises(ValueError, b.fromstring, "x")
    def test_repr(self):
        a = array.array(self.typecode, 2*self.example)
        self.assertEqual(a, eval(repr(a), {"array": array.array}))
        a = array.array(self.typecode)
        self.assertEqual(repr(a), "array('%s')" % self.typecode)
    def test_str(self):
        a = array.array(self.typecode, 2*self.example)
        str(a)
    def test_cmp(self):
        a = array.array(self.typecode, self.example)
        self.assert_((a == 42) is False)
        self.assert_((a != 42) is True)
        self.assert_((a == a) is True)
        self.assert_((a != a) is False)
        self.assert_((a < a) is False)
        self.assert_((a <= a) is True)
        self.assert_((a > a) is False)
        self.assert_((a >= a) is True)
        al = array.array(self.typecode, self.smallerexample)
        ab = array.array(self.typecode, self.biggerexample)
        self.assert_((a == 2*a) is False)
        self.assert_((a != 2*a) is True)
        self.assert_((a < 2*a) is True)
        self.assert_((a <= 2*a) is True)
        self.assert_((a > 2*a) is False)
        self.assert_((a >= 2*a) is False)
        self.assert_((a == al) is False)
        self.assert_((a != al) is True)
        self.assert_((a < al) is False)
        self.assert_((a <= al) is False)
        self.assert_((a > al) is True)
        self.assert_((a >= al) is True)
        self.assert_((a == ab) is False)
        self.assert_((a != ab) is True)
        self.assert_((a < ab) is True)
        self.assert_((a <= ab) is True)
        self.assert_((a > ab) is False)
        self.assert_((a >= ab) is False)
    def test_add(self):
        a = array.array(self.typecode, self.example) \
            + array.array(self.typecode, self.example[::-1])
        self.assertEqual(
            a,
            array.array(self.typecode, self.example + self.example[::-1])
        )
        b = array.array(self.badtypecode())
        self.assertRaises(TypeError, a.__add__, b)
        self.assertRaises(TypeError, a.__add__, "bad")
    def test_iadd(self):
        a = array.array(self.typecode, self.example[::-1])
        b = a
        a += array.array(self.typecode, 2*self.example)
        self.assert_(a is b)
        self.assertEqual(
            a,
            array.array(self.typecode, self.example[::-1]+2*self.example)
        )
        b = array.array(self.badtypecode())
        self.assertRaises(TypeError, a.__add__, b)
        self.assertRaises(TypeError, a.__iadd__, "bad")
    def test_mul(self):
        a = 5*array.array(self.typecode, self.example)
        self.assertEqual(
            a,
            array.array(self.typecode, 5*self.example)
        )
        a = array.array(self.typecode, self.example)*5
        self.assertEqual(
            a,
            array.array(self.typecode, self.example*5)
        )
        a = 0*array.array(self.typecode, self.example)
        self.assertEqual(
            a,
            array.array(self.typecode)
        )
        a = (-1)*array.array(self.typecode, self.example)
        self.assertEqual(
            a,
            array.array(self.typecode)
        )
        self.assertRaises(TypeError, a.__mul__, "bad")
    def test_imul(self):
        a = array.array(self.typecode, self.example)
        b = a
        a *= 5
        self.assert_(a is b)
        self.assertEqual(
            a,
            array.array(self.typecode, 5*self.example)
        )
        a *= 0
        self.assert_(a is b)
        self.assertEqual(a, array.array(self.typecode))
        a *= 1000
        self.assert_(a is b)
        self.assertEqual(a, array.array(self.typecode))
        a *= -1
        self.assert_(a is b)
        self.assertEqual(a, array.array(self.typecode))
        a = array.array(self.typecode, self.example)
        a *= -1
        self.assertEqual(a, array.array(self.typecode))
        self.assertRaises(TypeError, a.__imul__, "bad")
    def test_getitem(self):
        a = array.array(self.typecode, self.example)
        self.assertEntryEqual(a[0], self.example[0])
        self.assertEntryEqual(a[0], self.example[0])
        self.assertEntryEqual(a[-1], self.example[-1])
        self.assertEntryEqual(a[-1], self.example[-1])
        self.assertEntryEqual(a[len(self.example)-1], self.example[-1])
        self.assertEntryEqual(a[-len(self.example)], self.example[0])
        self.assertRaises(TypeError, a.__getitem__)
        self.assertRaises(IndexError, a.__getitem__, len(self.example))
        self.assertRaises(IndexError, a.__getitem__, -len(self.example)-1)
    def test_setitem(self):
        a = array.array(self.typecode, self.example)
        a[0] = a[-1]
        self.assertEntryEqual(a[0], a[-1])
        a = array.array(self.typecode, self.example)
        a[0] = a[-1]
        self.assertEntryEqual(a[0], a[-1])
        a = array.array(self.typecode, self.example)
        a[-1] = a[0]
        self.assertEntryEqual(a[0], a[-1])
        a = array.array(self.typecode, self.example)
        a[-1] = a[0]
        self.assertEntryEqual(a[0], a[-1])
        a = array.array(self.typecode, self.example)
        a[len(self.example)-1] = a[0]
        self.assertEntryEqual(a[0], a[-1])
        a = array.array(self.typecode, self.example)
        a[-len(self.example)] = a[-1]
        self.assertEntryEqual(a[0], a[-1])
        self.assertRaises(TypeError, a.__setitem__)
        self.assertRaises(TypeError, a.__setitem__, None)
        self.assertRaises(TypeError, a.__setitem__, 0, None)
        self.assertRaises(
            IndexError,
            a.__setitem__,
            len(self.example), self.example[0]
        )
        self.assertRaises(
            IndexError,
            a.__setitem__,
            -len(self.example)-1, self.example[0]
        )
    def test_delitem(self):
        a = array.array(self.typecode, self.example)
        del a[0]
        self.assertEqual(
            a,
            array.array(self.typecode, self.example[1:])
        )
        a = array.array(self.typecode, self.example)
        del a[-1]
        self.assertEqual(
            a,
            array.array(self.typecode, self.example[:-1])
        )
        a = array.array(self.typecode, self.example)
        del a[len(self.example)-1]
        self.assertEqual(
            a,
            array.array(self.typecode, self.example[:-1])
        )
        a = array.array(self.typecode, self.example)
        del a[-len(self.example)]
        self.assertEqual(
            a,
            array.array(self.typecode, self.example[1:])
        )
        self.assertRaises(TypeError, a.__delitem__)
        self.assertRaises(TypeError, a.__delitem__, None)
        self.assertRaises(IndexError, a.__delitem__, len(self.example))
        self.assertRaises(IndexError, a.__delitem__, -len(self.example)-1)
    def test_getslice(self):
        a = array.array(self.typecode, self.example)
        self.assertEqual(a[:], a)
        self.assertEqual(
            a[1:],
            array.array(self.typecode, self.example[1:])
        )
        self.assertEqual(
            a[:1],
            array.array(self.typecode, self.example[:1])
        )
        self.assertEqual(
            a[:-1],
            array.array(self.typecode, self.example[:-1])
        )
        self.assertEqual(
            a[-1:],
            array.array(self.typecode, self.example[-1:])
        )
        self.assertEqual(
            a[-1:-1],
            array.array(self.typecode)
        )
        self.assertEqual(
            a[2:1],
            array.array(self.typecode)
        )
        self.assertEqual(
            a[1000:],
            array.array(self.typecode)
        )
        self.assertEqual(a[-1000:], a)
        self.assertEqual(a[:1000], a)
        self.assertEqual(
            a[:-1000],
            array.array(self.typecode)
        )
        self.assertEqual(a[-1000:1000], a)
        self.assertEqual(
            a[2000:1000],
            array.array(self.typecode)
        )
    def test_extended_getslice(self):
        # Test extended slicing by comparing with list slicing
        # (Assumes list conversion works correctly, too)
        a = array.array(self.typecode, self.example)
        indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100)
        for start in indices:
            for stop in indices:
                # Everything except the initial 0 (invalid step)
                for step in indices[1:]:
                    self.assertEqual(list(a[start:stop:step]),
                                     list(a)[start:stop:step])
    def test_setslice(self):
        a = array.array(self.typecode, self.example)
        a[:1] = a
        self.assertEqual(
            a,
            array.array(self.typecode, self.example + self.example[1:])
        )
        a = array.array(self.typecode, self.example)
        a[:-1] = a
        self.assertEqual(
            a,
            array.array(self.typecode, self.example + self.example[-1:])
        )
        a = array.array(self.typecode, self.example)
        a[-1:] = a
        self.assertEqual(
            a,
            array.array(self.typecode, self.example[:-1] + self.example)
        )
        a = array.array(self.typecode, self.example)
        a[1:] = a
        self.assertEqual(
            a,
            array.array(self.typecode, self.example[:1] + self.example)
        )
        a = array.array(self.typecode, self.example)
        a[1:-1] = a
        self.assertEqual(
            a,
            array.array(
                self.typecode,
                self.example[:1] + self.example + self.example[-1:]
            )
        )
        a = array.array(self.typecode, self.example)
        a[1000:] = a
        self.assertEqual(
            a,
            array.array(self.typecode, 2*self.example)
        )
        a = array.array(self.typecode, self.example)
        a[-1000:] = a
        self.assertEqual(
            a,
            array.array(self.typecode, self.example)
        )
        a = array.array(self.typecode, self.example)
        a[:1000] = a
        self.assertEqual(
            a,
            array.array(self.typecode, self.example)
        )
        a = array.array(self.typecode, self.example)
        a[:-1000] = a
        self.assertEqual(
            a,
            array.array(self.typecode, 2*self.example)
        )
        a = array.array(self.typecode, self.example)
        a[1:0] = a
        self.assertEqual(
            a,
            array.array(self.typecode, self.example[:1] + self.example + self.example[1:])
        )
        a = array.array(self.typecode, self.example)
        a[2000:1000] = a
        self.assertEqual(
            a,
            array.array(self.typecode, 2*self.example)
        )
        a = array.array(self.typecode, self.example)
        self.assertRaises(TypeError, a.__setitem__, slice(0, 0), None)
        self.assertRaises(TypeError, a.__setitem__, slice(0, 1), None)
        b = array.array(self.badtypecode())
        self.assertRaises(TypeError, a.__setitem__, slice(0, 0), b)
        self.assertRaises(TypeError, a.__setitem__, slice(0, 1), b)
    def test_extended_set_del_slice(self):
        indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100)
        for start in indices:
            for stop in indices:
                # Everything except the initial 0 (invalid step)
                for step in indices[1:]:
                    a = array.array(self.typecode, self.example)
                    L = list(a)
                    # Make sure we have a slice of exactly the right length,
                    # but with (hopefully) different data.
                    data = L[start:stop:step]
                    data.reverse()
                    L[start:stop:step] = data
                    a[start:stop:step] = array.array(self.typecode, data)
                    self.assertEquals(a, array.array(self.typecode, L))
                    del L[start:stop:step]
                    del a[start:stop:step]
                    self.assertEquals(a, array.array(self.typecode, L))
    def test_index(self):
        example = 2*self.example
        a = array.array(self.typecode, example)
        self.assertRaises(TypeError, a.index)
        for x in example:
            self.assertEqual(a.index(x), example.index(x))
        self.assertRaises(ValueError, a.index, None)
        self.assertRaises(ValueError, a.index, self.outside)
    def test_count(self):
        example = 2*self.example
        a = array.array(self.typecode, example)
        self.assertRaises(TypeError, a.count)
        for x in example:
            self.assertEqual(a.count(x), example.count(x))
        self.assertEqual(a.count(self.outside), 0)
        self.assertEqual(a.count(None), 0)
    def test_remove(self):
        for x in self.example:
            example = 2*self.example
            a = array.array(self.typecode, example)
            pos = example.index(x)
            example2 = example[:pos] + example[pos+1:]
            a.remove(x)
            self.assertEqual(a, array.array(self.typecode, example2))
        a = array.array(self.typecode, self.example)
        self.assertRaises(ValueError, a.remove, self.outside)
        self.assertRaises(ValueError, a.remove, None)
    def test_pop(self):
        a = array.array(self.typecode)
        self.assertRaises(IndexError, a.pop)
        a = array.array(self.typecode, 2*self.example)
        self.assertRaises(TypeError, a.pop, 42, 42)
        self.assertRaises(TypeError, a.pop, None)
        self.assertRaises(IndexError, a.pop, len(a))
        self.assertRaises(IndexError, a.pop, -len(a)-1)
        self.assertEntryEqual(a.pop(0), self.example[0])
        self.assertEqual(
            a,
            array.array(self.typecode, self.example[1:]+self.example)
        )
        self.assertEntryEqual(a.pop(1), self.example[2])
        self.assertEqual(
            a,
            array.array(self.typecode, self.example[1:2]+self.example[3:]+self.example)
        )
        self.assertEntryEqual(a.pop(0), self.example[1])
        self.assertEntryEqual(a.pop(), self.example[-1])
        self.assertEqual(
            a,
            array.array(self.typecode, self.example[3:]+self.example[:-1])
        )
    def test_reverse(self):
        a = array.array(self.typecode, self.example)
        self.assertRaises(TypeError, a.reverse, 42)
        a.reverse()
        self.assertEqual(
            a,
            array.array(self.typecode, self.example[::-1])
        )
    def test_extend(self):
        a = array.array(self.typecode, self.example)
        self.assertRaises(TypeError, a.extend)
        a.extend(array.array(self.typecode, self.example[::-1]))
        self.assertEqual(
            a,
            array.array(self.typecode, self.example+self.example[::-1])
        )
        b = array.array(self.badtypecode())
        self.assertRaises(TypeError, a.extend, b)
        a = array.array(self.typecode, self.example)
        a.extend(self.example[::-1])
        self.assertEqual(
            a,
            array.array(self.typecode, self.example+self.example[::-1])
        )
    def test_constructor_with_iterable_argument(self):
        a = array.array(self.typecode, iter(self.example))
        b = array.array(self.typecode, self.example)
        self.assertEqual(a, b)
        # non-iterable argument
        self.assertRaises(TypeError, array.array, self.typecode, 10)
        # pass through errors raised in __iter__
        class A:
            def __iter__(self):
                raise UnicodeError
        self.assertRaises(UnicodeError, array.array, self.typecode, A())
        # pass through errors raised in next()
        def B():
            raise UnicodeError
            yield None
        self.assertRaises(UnicodeError, array.array, self.typecode, B())
    def test_coveritertraverse(self):
        try:
            import gc
        except ImportError:
            return
        a = array.array(self.typecode)
        l = [iter(a)]
        l.append(l)
        gc.collect()
    def test_buffer(self):
        a = array.array(self.typecode, self.example)
        m = memoryview(a)
        b = bytes(m)
        self.assertEqual(b, a.tostring())
        self.assertEqual(b[0], a.tostring()[0])
        # Resizing is forbidden when there are buffer exports
        self.assertRaises(BufferError, a.append, a[0])
        self.assertRaises(BufferError, a.extend, a[0:1])
        self.assertRaises(BufferError, a.remove, a[0])
        self.assertRaises(BufferError, a.fromlist, a.tolist())
        self.assertRaises(BufferError, a.fromstring, a.tostring())
        if self.typecode == 'u':
            self.assertRaises(BufferError, a.fromunicode, a.tounicode())
        self.assertRaises(BufferError, operator.setitem, a, slice(0, 0), a)
        self.assertRaises(BufferError, operator.delitem, a, 0)
        self.assertRaises(BufferError, operator.delitem, a, slice(0, 1))
        self.assertRaises(BufferError, operator.imul, a, 2)
        self.assertRaises(BufferError, operator.imul, a, 0)
    def test_weakref(self):
        s = array.array(self.typecode, self.example)
        p = proxy(s)
        self.assertEqual(p.tostring(), s.tostring())
        s = None
        self.assertRaises(ReferenceError, len, p)
    def test_bug_782369(self):
        import sys
        if hasattr(sys, "getrefcount"):
            for i in range(10):
                b = array.array('B', range(64))
            rc = sys.getrefcount(10)
            for i in range(10):
                b = array.array('B', range(64))
            self.assertEqual(rc, sys.getrefcount(10))
    def test_subclass_with_kwargs(self):
        # SF bug #1486663 -- this used to erroneously raise a TypeError
        ArraySubclassWithKwargs('b', newarg=1)
    def test_create_from_bytes(self):
        a = array.array('H', b"1234")
        self.assertEqual(len(a) * a.itemsize, 4)
class StringTest(BaseTest):
    def test_setitem(self):
        super().test_setitem()
        a = array.array(self.typecode, self.example)
        self.assertRaises(TypeError, a.__setitem__, 0, self.example[:2])
class UnicodeTest(StringTest):
    typecode = 'u'
    example = '\x01\u263a\x00\ufeff'
    smallerexample = '\x01\u263a\x00\ufefe'
    biggerexample = '\x01\u263a\x01\ufeff'
    outside = str('\x33')
    minitemsize = 2
    def test_unicode(self):
        self.assertRaises(TypeError, array.array, 'b', 'foo')
        a = array.array('u', '\xa0\xc2\u1234')
        a.fromunicode(' ')
        a.fromunicode('')
        a.fromunicode('')
        a.fromunicode('\x11abc\xff\u1234')
        s = a.tounicode()
        self.assertEqual(s, '\xa0\xc2\u1234 \x11abc\xff\u1234')
        s = '\x00="\'a\\b\x80\xff\u0000\u0001\u1234'
        a = array.array('u', s)
        self.assertEqual(
            repr(a),
            "array('u', '\\x00=\"\\'a\\\\b\\x80\xff\\x00\\x01\u1234')")
        self.assertRaises(TypeError, a.fromunicode)
tests.append(UnicodeTest)
class NumberTest(BaseTest):
    def test_extslice(self):
        a = array.array(self.typecode, range(5))
        self.assertEqual(a[::], a)
        self.assertEqual(a[::2], array.array(self.typecode, [0,2,4]))
        self.assertEqual(a[1::2], array.array(self.typecode, [1,3]))
        self.assertEqual(a[::-1], array.array(self.typecode, [4,3,2,1,0]))
        self.assertEqual(a[::-2], array.array(self.typecode, [4,2,0]))
        self.assertEqual(a[3::-2], array.array(self.typecode, [3,1]))
        self.assertEqual(a[-100:100:], a)
        self.assertEqual(a[100:-100:-1], a[::-1])
        self.assertEqual(a[-100:100:2], array.array(self.typecode, [0,2,4]))
        self.assertEqual(a[1000:2000:2], array.array(self.typecode, []))
        self.assertEqual(a[-1000:-2000:-2], array.array(self.typecode, []))
    def test_delslice(self):
        a = array.array(self.typecode, range(5))
        del a[::2]
        self.assertEqual(a, array.array(self.typecode, [1,3]))
        a = array.array(self.typecode, range(5))
        del a[1::2]
        self.assertEqual(a, array.array(self.typecode, [0,2,4]))
        a = array.array(self.typecode, range(5))
        del a[1::-2]
        self.assertEqual(a, array.array(self.typecode, [0,2,3,4]))
        a = array.array(self.typecode, range(10))
        del a[::1000]
        self.assertEqual(a, array.array(self.typecode, [1,2,3,4,5,6,7,8,9]))
    def test_assignment(self):
        a = array.array(self.typecode, range(10))
        a[::2] = array.array(self.typecode, [42]*5)
        self.assertEqual(a, array.array(self.typecode, [42, 1, 42, 3, 42, 5, 42, 7, 42, 9]))
        a = array.array(self.typecode, range(10))
        a[::-4] = array.array(self.typecode, [10]*3)
        self.assertEqual(a, array.array(self.typecode, [0, 10, 2, 3, 4, 10, 6, 7, 8 ,10]))
        a = array.array(self.typecode, range(4))
        a[::-1] = a
        self.assertEqual(a, array.array(self.typecode, [3, 2, 1, 0]))
        a = array.array(self.typecode, range(10))
        b = a[:]
        c = a[:]
        ins = array.array(self.typecode, range(2))
        a[2:3] = ins
        b[slice(2,3)] = ins
        c[2:3:] = ins
    def test_iterationcontains(self):
        a = array.array(self.typecode, range(10))
        self.assertEqual(list(a), list(range(10)))
        b = array.array(self.typecode, [20])
        self.assertEqual(a[-1] in a, True)
        self.assertEqual(b[0] not in a, True)
    def check_overflow(self, lower, upper):
        # method to be used by subclasses
        # should not overflow assigning lower limit
        a = array.array(self.typecode, [lower])
        a[0] = lower
        # should overflow assigning less than lower limit
        self.assertRaises(OverflowError, array.array, self.typecode, [lower-1])
        self.assertRaises(OverflowError, a.__setitem__, 0, lower-1)
        # should not overflow assigning upper limit
        a = array.array(self.typecode, [upper])
        a[0] = upper
        # should overflow assigning more than upper limit
        self.assertRaises(OverflowError, array.array, self.typecode, [upper+1])
        self.assertRaises(OverflowError, a.__setitem__, 0, upper+1)
    def test_subclassing(self):
        typecode = self.typecode
        class ExaggeratingArray(array.array):
            __slots__ = ['offset']
            def __new__(cls, typecode, data, offset):
                return array.array.__new__(cls, typecode, data)
            def __init__(self, typecode, data, offset):
                self.offset = offset
            def __getitem__(self, i):
                return array.array.__getitem__(self, i) + self.offset
        a = ExaggeratingArray(self.typecode, [3, 6, 7, 11], 4)
        self.assertEntryEqual(a[0], 7)
        self.assertRaises(AttributeError, setattr, a, "color", "blue")
class SignedNumberTest(NumberTest):
    example = [-1, 0, 1, 42, 0x7f]
    smallerexample = [-1, 0, 1, 42, 0x7e]
    biggerexample = [-1, 0, 1, 43, 0x7f]
    outside = 23
    def test_overflow(self):
        a = array.array(self.typecode)
        lower = -1 * int(pow(2, a.itemsize * 8 - 1))
        upper = int(pow(2, a.itemsize * 8 - 1)) - 1
        self.check_overflow(lower, upper)
class UnsignedNumberTest(NumberTest):
    example = [0, 1, 17, 23, 42, 0xff]
    smallerexample = [0, 1, 17, 23, 42, 0xfe]
    biggerexample = [0, 1, 17, 23, 43, 0xff]
    outside = 0xaa
    def test_overflow(self):
        a = array.array(self.typecode)
        lower = 0
        upper = int(pow(2, a.itemsize * 8)) - 1
        self.check_overflow(lower, upper)
class ByteTest(SignedNumberTest):
    typecode = 'b'
    minitemsize = 1
tests.append(ByteTest)
class UnsignedByteTest(UnsignedNumberTest):
    typecode = 'B'
    minitemsize = 1
tests.append(UnsignedByteTest)
class ShortTest(SignedNumberTest):
    typecode = 'h'
    minitemsize = 2
tests.append(ShortTest)
class UnsignedShortTest(UnsignedNumberTest):
    typecode = 'H'
    minitemsize = 2
tests.append(UnsignedShortTest)
class IntTest(SignedNumberTest):
    typecode = 'i'
    minitemsize = 2
tests.append(IntTest)
class UnsignedIntTest(UnsignedNumberTest):
    typecode = 'I'
    minitemsize = 2
tests.append(UnsignedIntTest)
class LongTest(SignedNumberTest):
    typecode = 'l'
    minitemsize = 4
tests.append(LongTest)
class UnsignedLongTest(UnsignedNumberTest):
    typecode = 'L'
    minitemsize = 4
tests.append(UnsignedLongTest)
class FPTest(NumberTest):
    example = [-42.0, 0, 42, 1e5, -1e10]
    smallerexample = [-42.0, 0, 42, 1e5, -2e10]
    biggerexample = [-42.0, 0, 42, 1e5, 1e10]
    outside = 23
    def assertEntryEqual(self, entry1, entry2):
        self.assertAlmostEqual(entry1, entry2)
    def test_byteswap(self):
        a = array.array(self.typecode, self.example)
        self.assertRaises(TypeError, a.byteswap, 42)
        if a.itemsize in (1, 2, 4, 8):
            b = array.array(self.typecode, self.example)
            b.byteswap()
            if a.itemsize==1:
                self.assertEqual(a, b)
            else:
                # On alphas treating the byte swapped bit patters as
                # floats/doubles results in floating point exceptions
                # => compare the 8bit string values instead
                self.assertNotEqual(a.tostring(), b.tostring())
            b.byteswap()
            self.assertEqual(a, b)
class FloatTest(FPTest):
    typecode = 'f'
    minitemsize = 4
tests.append(FloatTest)
class DoubleTest(FPTest):
    typecode = 'd'
    minitemsize = 8
    def test_alloc_overflow(self):
        from sys import maxsize
        a = array.array('d', [-1]*65536)
        try:
            a *= maxsize//65536 + 1
        except MemoryError:
            pass
        else:
            self.fail("Array of size > maxsize created - MemoryError expected")
        b = array.array('d', [ 2.71828183, 3.14159265, -1])
        try:
            b * (maxsize//3 + 1)
        except MemoryError:
            pass
        else:
            self.fail("Array of size > maxsize created - MemoryError expected")
tests.append(DoubleTest)
def test_main(verbose=None):
    import sys
    support.run_unittest(*tests)
    # verify reference counting
    if verbose and hasattr(sys, "gettotalrefcount"):
        import gc
        counts = [None] * 5
        for i in range(len(counts)):
            support.run_unittest(*tests)
            gc.collect()
            counts[i] = sys.gettotalrefcount()
        print(counts)
if __name__ == "__main__":
    test_main(verbose=True)
 | |
| 
	# Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import islice, cycle, groupby, repeat
import logging
from random import randint
from threading import Lock
import six
from cassandra import ConsistencyLevel
from six.moves import range
log = logging.getLogger(__name__)
class HostDistance(object):
    """
    A measure of how "distant" a node is from the client, which
    may influence how the load balancer distributes requests
    and how many connections are opened to the node.
    """
    IGNORED = -1
    """
    A node with this distance should never be queried or have
    connections opened to it.
    """
    LOCAL = 0
    """
    Nodes with ``LOCAL`` distance will be preferred for operations
    under some load balancing policies (such as :class:`.DCAwareRoundRobinPolicy`)
    and will have a greater number of connections opened against
    them by default.
    This distance is typically used for nodes within the same
    datacenter as the client.
    """
    REMOTE = 1
    """
    Nodes with ``REMOTE`` distance will be treated as a last resort
    by some load balancing policies (such as :class:`.DCAwareRoundRobinPolicy`)
    and will have a smaller number of connections opened against
    them by default.
    This distance is typically used for nodes outside of the
    datacenter that the client is running in.
    """
class HostStateListener(object):
    def on_up(self, host):
        """ Called when a node is marked up. """
        raise NotImplementedError()
    def on_down(self, host):
        """ Called when a node is marked down. """
        raise NotImplementedError()
    def on_add(self, host):
        """
        Called when a node is added to the cluster.  The newly added node
        should be considered up.
        """
        raise NotImplementedError()
    def on_remove(self, host):
        """ Called when a node is removed from the cluster. """
        raise NotImplementedError()
class LoadBalancingPolicy(HostStateListener):
    """
    Load balancing policies are used to decide how to distribute
    requests among all possible coordinator nodes in the cluster.
    In particular, they may focus on querying "near" nodes (those
    in a local datacenter) or on querying nodes who happen to
    be replicas for the requested data.
    You may also use subclasses of :class:`.LoadBalancingPolicy` for
    custom behavior.
    """
    _hosts_lock = None
    def __init__(self):
        self._hosts_lock = Lock()
    def distance(self, host):
        """
        Returns a measure of how remote a :class:`~.pool.Host` is in
        terms of the :class:`.HostDistance` enums.
        """
        raise NotImplementedError()
    def populate(self, cluster, hosts):
        """
        This method is called to initialize the load balancing
        policy with a set of :class:`.Host` instances before its
        first use.  The `cluster` parameter is an instance of
        :class:`.Cluster`.
        """
        raise NotImplementedError()
    def make_query_plan(self, working_keyspace=None, query=None):
        """
        Given a :class:`~.query.Statement` instance, return a iterable
        of :class:`.Host` instances which should be queried in that
        order.  A generator may work well for custom implementations
        of this method.
        Note that the `query` argument may be :const:`None` when preparing
        statements.
        `working_keyspace` should be the string name of the current keyspace,
        as set through :meth:`.Session.set_keyspace()` or with a ``USE``
        statement.
        """
        raise NotImplementedError()
    def check_supported(self):
        """
        This will be called after the cluster Metadata has been initialized.
        If the load balancing policy implementation cannot be supported for
        some reason (such as a missing C extension), this is the point at
        which it should raise an exception.
        """
        pass
class RoundRobinPolicy(LoadBalancingPolicy):
    """
    A subclass of :class:`.LoadBalancingPolicy` which evenly
    distributes queries across all nodes in the cluster,
    regardless of what datacenter the nodes may be in.
    This load balancing policy is used by default.
    """
    _live_hosts = frozenset(())
    def populate(self, cluster, hosts):
        self._live_hosts = frozenset(hosts)
        if len(hosts) <= 1:
            self._position = 0
        else:
            self._position = randint(0, len(hosts) - 1)
    def distance(self, host):
        return HostDistance.LOCAL
    def make_query_plan(self, working_keyspace=None, query=None):
        # not thread-safe, but we don't care much about lost increments
        # for the purposes of load balancing
        pos = self._position
        self._position += 1
        hosts = self._live_hosts
        length = len(hosts)
        if length:
            pos %= length
            return list(islice(cycle(hosts), pos, pos + length))
        else:
            return []
    def on_up(self, host):
        with self._hosts_lock:
            self._live_hosts = self._live_hosts.union((host, ))
    def on_down(self, host):
        with self._hosts_lock:
            self._live_hosts = self._live_hosts.difference((host, ))
    def on_add(self, host):
        with self._hosts_lock:
            self._live_hosts = self._live_hosts.union((host, ))
    def on_remove(self, host):
        with self._hosts_lock:
            self._live_hosts = self._live_hosts.difference((host, ))
class DCAwareRoundRobinPolicy(LoadBalancingPolicy):
    """
    Similar to :class:`.RoundRobinPolicy`, but prefers hosts
    in the local datacenter and only uses nodes in remote
    datacenters as a last resort.
    """
    local_dc = None
    used_hosts_per_remote_dc = 0
    def __init__(self, local_dc='', used_hosts_per_remote_dc=0):
        """
        The `local_dc` parameter should be the name of the datacenter
        (such as is reported by ``nodetool ring``) that should
        be considered local. If not specified, the driver will choose
        a local_dc based on the first host among :attr:`.Cluster.contact_points`
        having a valid DC. If relying on this mechanism, all specified
        contact points should be nodes in a single, local DC.
        `used_hosts_per_remote_dc` controls how many nodes in
        each remote datacenter will have connections opened
        against them. In other words, `used_hosts_per_remote_dc` hosts
        will be considered :attr:`~.HostDistance.REMOTE` and the
        rest will be considered :attr:`~.HostDistance.IGNORED`.
        By default, all remote hosts are ignored.
        """
        self.local_dc = local_dc
        self.used_hosts_per_remote_dc = used_hosts_per_remote_dc
        self._dc_live_hosts = {}
        self._position = 0
        self._contact_points = []
        LoadBalancingPolicy.__init__(self)
    def _dc(self, host):
        return host.datacenter or self.local_dc
    def populate(self, cluster, hosts):
        for dc, dc_hosts in groupby(hosts, lambda h: self._dc(h)):
            self._dc_live_hosts[dc] = tuple(set(dc_hosts))
        if not self.local_dc:
            self._contact_points = cluster.contact_points
        self._position = randint(0, len(hosts) - 1) if hosts else 0
    def distance(self, host):
        dc = self._dc(host)
        if dc == self.local_dc:
            return HostDistance.LOCAL
        if not self.used_hosts_per_remote_dc:
            return HostDistance.IGNORED
        else:
            dc_hosts = self._dc_live_hosts.get(dc)
            if not dc_hosts:
                return HostDistance.IGNORED
            if host in list(dc_hosts)[:self.used_hosts_per_remote_dc]:
                return HostDistance.REMOTE
            else:
                return HostDistance.IGNORED
    def make_query_plan(self, working_keyspace=None, query=None):
        # not thread-safe, but we don't care much about lost increments
        # for the purposes of load balancing
        pos = self._position
        self._position += 1
        local_live = self._dc_live_hosts.get(self.local_dc, ())
        pos = (pos % len(local_live)) if local_live else 0
        for host in islice(cycle(local_live), pos, pos + len(local_live)):
            yield host
        # the dict can change, so get candidate DCs iterating over keys of a copy
        other_dcs = [dc for dc in self._dc_live_hosts.copy().keys() if dc != self.local_dc]
        for dc in other_dcs:
            remote_live = self._dc_live_hosts.get(dc, ())
            for host in remote_live[:self.used_hosts_per_remote_dc]:
                yield host
    def on_up(self, host):
        # not worrying about threads because this will happen during
        # control connection startup/refresh
        if not self.local_dc and host.datacenter:
            if host.address in self._contact_points:
                self.local_dc = host.datacenter
                log.info("Using datacenter '%s' for DCAwareRoundRobinPolicy (via host '%s'); "
                         "if incorrect, please specify a local_dc to the constructor, "
                         "or limit contact points to local cluster nodes" %
                         (self.local_dc, host.address))
                del self._contact_points
        dc = self._dc(host)
        with self._hosts_lock:
            current_hosts = self._dc_live_hosts.get(dc, ())
            if host not in current_hosts:
                self._dc_live_hosts[dc] = current_hosts + (host, )
    def on_down(self, host):
        dc = self._dc(host)
        with self._hosts_lock:
            current_hosts = self._dc_live_hosts.get(dc, ())
            if host in current_hosts:
                hosts = tuple(h for h in current_hosts if h != host)
                if hosts:
                    self._dc_live_hosts[dc] = hosts
                else:
                    del self._dc_live_hosts[dc]
    def on_add(self, host):
        self.on_up(host)
    def on_remove(self, host):
        self.on_down(host)
class TokenAwarePolicy(LoadBalancingPolicy):
    """
    A :class:`.LoadBalancingPolicy` wrapper that adds token awareness to
    a child policy.
    This alters the child policy's behavior so that it first attempts to
    send queries to :attr:`~.HostDistance.LOCAL` replicas (as determined
    by the child policy) based on the :class:`.Statement`'s
    :attr:`~.Statement.routing_key`.  Once those hosts are exhausted, the
    remaining hosts in the child policy's query plan will be used.
    If no :attr:`~.Statement.routing_key` is set on the query, the child
    policy's query plan will be used as is.
    """
    _child_policy = None
    _cluster_metadata = None
    def __init__(self, child_policy):
        self._child_policy = child_policy
    def populate(self, cluster, hosts):
        self._cluster_metadata = cluster.metadata
        self._child_policy.populate(cluster, hosts)
    def check_supported(self):
        if not self._cluster_metadata.can_support_partitioner():
            raise Exception(
                '%s cannot be used with the cluster partitioner (%s) because '
                'the relevant C extension for this driver was not compiled. '
                'See the installation instructions for details on building '
                'and installing the C extensions.' %
                (self.__class__.__name__, self._cluster_metadata.partitioner))
    def distance(self, *args, **kwargs):
        return self._child_policy.distance(*args, **kwargs)
    def make_query_plan(self, working_keyspace=None, query=None):
        if query and query.keyspace:
            keyspace = query.keyspace
        else:
            keyspace = working_keyspace
        child = self._child_policy
        if query is None:
            for host in child.make_query_plan(keyspace, query):
                yield host
        else:
            routing_key = query.routing_key
            if routing_key is None or keyspace is None:
                for host in child.make_query_plan(keyspace, query):
                    yield host
            else:
                replicas = self._cluster_metadata.get_replicas(keyspace, routing_key)
                for replica in replicas:
                    if replica.is_up and \
                            child.distance(replica) == HostDistance.LOCAL:
                        yield replica
                for host in child.make_query_plan(keyspace, query):
                    # skip if we've already listed this host
                    if host not in replicas or \
                            child.distance(host) == HostDistance.REMOTE:
                        yield host
    def on_up(self, *args, **kwargs):
        return self._child_policy.on_up(*args, **kwargs)
    def on_down(self, *args, **kwargs):
        return self._child_policy.on_down(*args, **kwargs)
    def on_add(self, *args, **kwargs):
        return self._child_policy.on_add(*args, **kwargs)
    def on_remove(self, *args, **kwargs):
        return self._child_policy.on_remove(*args, **kwargs)
class WhiteListRoundRobinPolicy(RoundRobinPolicy):
    """
    A subclass of :class:`.RoundRobinPolicy` which evenly
    distributes queries across all nodes in the cluster,
    regardless of what datacenter the nodes may be in, but
    only if that node exists in the list of allowed nodes
    This policy is addresses the issue described in
    https://datastax-oss.atlassian.net/browse/JAVA-145
    Where connection errors occur when connection
    attempts are made to private IP addresses remotely
    """
    def __init__(self, hosts):
        """
        The `hosts` parameter should be a sequence of hosts to permit
        connections to.
        """
        self._allowed_hosts = hosts
        RoundRobinPolicy.__init__(self)
    def populate(self, cluster, hosts):
        self._live_hosts = frozenset(h for h in hosts if h.address in self._allowed_hosts)
        if len(hosts) <= 1:
            self._position = 0
        else:
            self._position = randint(0, len(hosts) - 1)
    def distance(self, host):
        if host.address in self._allowed_hosts:
            return HostDistance.LOCAL
        else:
            return HostDistance.IGNORED
    def on_up(self, host):
        if host.address in self._allowed_hosts:
            RoundRobinPolicy.on_up(self, host)
    def on_add(self, host):
        if host.address in self._allowed_hosts:
            RoundRobinPolicy.on_add(self, host)
class ConvictionPolicy(object):
    """
    A policy which decides when hosts should be considered down
    based on the types of failures and the number of failures.
    If custom behavior is needed, this class may be subclassed.
    """
    def __init__(self, host):
        """
        `host` is an instance of :class:`.Host`.
        """
        self.host = host
    def add_failure(self, connection_exc):
        """
        Implementations should return :const:`True` if the host should be
        convicted, :const:`False` otherwise.
        """
        raise NotImplementedError()
    def reset(self):
        """
        Implementations should clear out any convictions or state regarding
        the host.
        """
        raise NotImplementedError()
class SimpleConvictionPolicy(ConvictionPolicy):
    """
    The default implementation of :class:`ConvictionPolicy`,
    which simply marks a host as down after the first failure
    of any kind.
    """
    def add_failure(self, connection_exc):
        return True
    def reset(self):
        pass
class ReconnectionPolicy(object):
    """
    This class and its subclasses govern how frequently an attempt is made
    to reconnect to nodes that are marked as dead.
    If custom behavior is needed, this class may be subclassed.
    """
    def new_schedule(self):
        """
        This should return a finite or infinite iterable of delays (each as a
        floating point number of seconds) inbetween each failed reconnection
        attempt.  Note that if the iterable is finite, reconnection attempts
        will cease once the iterable is exhausted.
        """
        raise NotImplementedError()
class ConstantReconnectionPolicy(ReconnectionPolicy):
    """
    A :class:`.ReconnectionPolicy` subclass which sleeps for a fixed delay
    inbetween each reconnection attempt.
    """
    def __init__(self, delay, max_attempts=64):
        """
        `delay` should be a floating point number of seconds to wait inbetween
        each attempt.
        `max_attempts` should be a total number of attempts to be made before
        giving up, or :const:`None` to continue reconnection attempts forever.
        The default is 64.
        """
        if delay < 0:
            raise ValueError("delay must not be negative")
        if max_attempts < 0:
            raise ValueError("max_attempts must not be negative")
        self.delay = delay
        self.max_attempts = max_attempts
    def new_schedule(self):
        return repeat(self.delay, self.max_attempts)
class ExponentialReconnectionPolicy(ReconnectionPolicy):
    """
    A :class:`.ReconnectionPolicy` subclass which exponentially increases
    the length of the delay inbetween each reconnection attempt up to
    a set maximum delay.
    """
    def __init__(self, base_delay, max_delay):
        """
        `base_delay` and `max_delay` should be in floating point units of
        seconds.
        """
        if base_delay < 0 or max_delay < 0:
            raise ValueError("Delays may not be negative")
        if max_delay < base_delay:
            raise ValueError("Max delay must be greater than base delay")
        self.base_delay = base_delay
        self.max_delay = max_delay
    def new_schedule(self):
        return (min(self.base_delay * (2 ** i), self.max_delay) for i in range(64))
class WriteType(object):
    """
    For usage with :class:`.RetryPolicy`, this describe a type
    of write operation.
    """
    SIMPLE = 0
    """
    A write to a single partition key. Such writes are guaranteed to be atomic
    and isolated.
    """
    BATCH = 1
    """
    A write to multiple partition keys that used the distributed batch log to
    ensure atomicity.
    """
    UNLOGGED_BATCH = 2
    """
    A write to multiple partition keys that did not use the distributed batch
    log. Atomicity for such writes is not guaranteed.
    """
    COUNTER = 3
    """
    A counter write (for one or multiple partition keys). Such writes should
    not be replayed in order to avoid overcount.
    """
    BATCH_LOG = 4
    """
    The initial write to the distributed batch log that Cassandra performs
    internally before a BATCH write.
    """
    CAS = 5
    """
    A lighweight-transaction write, such as "DELETE ... IF EXISTS".
    """
WriteType.name_to_value = {
    'SIMPLE': WriteType.SIMPLE,
    'BATCH': WriteType.BATCH,
    'UNLOGGED_BATCH': WriteType.UNLOGGED_BATCH,
    'COUNTER': WriteType.COUNTER,
    'BATCH_LOG': WriteType.BATCH_LOG,
    'CAS': WriteType.CAS
}
class RetryPolicy(object):
    """
    A policy that describes whether to retry, rethrow, or ignore coordinator
    timeout and unavailable failures. These are failures reported from the
    server side. Timeouts are configured by
    `settings in cassandra.yaml <https://github.com/apache/cassandra/blob/cassandra-2.1.4/conf/cassandra.yaml#L568-L584>`_.
    Unavailable failures occur when the coordinator cannot acheive the consistency
    level for a request. For further information see the method descriptions
    below.
    To specify a default retry policy, set the
    :attr:`.Cluster.default_retry_policy` attribute to an instance of this
    class or one of its subclasses.
    To specify a retry policy per query, set the :attr:`.Statement.retry_policy`
    attribute to an instance of this class or one of its subclasses.
    If custom behavior is needed for retrying certain operations,
    this class may be subclassed.
    """
    RETRY = 0
    """
    This should be returned from the below methods if the operation
    should be retried on the same connection.
    """
    RETHROW = 1
    """
    This should be returned from the below methods if the failure
    should be propagated and no more retries attempted.
    """
    IGNORE = 2
    """
    This should be returned from the below methods if the failure
    should be ignored but no more retries should be attempted.
    """
    def on_read_timeout(self, query, consistency, required_responses,
                        received_responses, data_retrieved, retry_num):
        """
        This is called when a read operation times out from the coordinator's
        perspective (i.e. a replica did not respond to the coordinator in time).
        It should return a tuple with two items: one of the class enums (such
        as :attr:`.RETRY`) and a :class:`.ConsistencyLevel` to retry the
        operation at or :const:`None` to keep the same consistency level.
        `query` is the :class:`.Statement` that timed out.
        `consistency` is the :class:`.ConsistencyLevel` that the operation was
        attempted at.
        The `required_responses` and `received_responses` parameters describe
        how many replicas needed to respond to meet the requested consistency
        level and how many actually did respond before the coordinator timed
        out the request. `data_retrieved` is a boolean indicating whether
        any of those responses contained data (as opposed to just a digest).
        `retry_num` counts how many times the operation has been retried, so
        the first time this method is called, `retry_num` will be 0.
        By default, operations will be retried at most once, and only if
        a sufficient number of replicas responded (with data digests).
        """
        if retry_num != 0:
            return (self.RETHROW, None)
        elif received_responses >= required_responses and not data_retrieved:
            return (self.RETRY, consistency)
        else:
            return (self.RETHROW, None)
    def on_write_timeout(self, query, consistency, write_type,
                         required_responses, received_responses, retry_num):
        """
        This is called when a write operation times out from the coordinator's
        perspective (i.e. a replica did not respond to the coordinator in time).
        `query` is the :class:`.Statement` that timed out.
        `consistency` is the :class:`.ConsistencyLevel` that the operation was
        attempted at.
        `write_type` is one of the :class:`.WriteType` enums describing the
        type of write operation.
        The `required_responses` and `received_responses` parameters describe
        how many replicas needed to acknowledge the write to meet the requested
        consistency level and how many replicas actually did acknowledge the
        write before the coordinator timed out the request.
        `retry_num` counts how many times the operation has been retried, so
        the first time this method is called, `retry_num` will be 0.
        By default, failed write operations will retried at most once, and
        they will only be retried if the `write_type` was
        :attr:`~.WriteType.BATCH_LOG`.
        """
        if retry_num != 0:
            return (self.RETHROW, None)
        elif write_type == WriteType.BATCH_LOG:
            return (self.RETRY, consistency)
        else:
            return (self.RETHROW, None)
    def on_unavailable(self, query, consistency, required_replicas, alive_replicas, retry_num):
        """
        This is called when the coordinator node determines that a read or
        write operation cannot be successful because the number of live
        replicas are too low to meet the requested :class:`.ConsistencyLevel`.
        This means that the read or write operation was never forwared to
        any replicas.
        `query` is the :class:`.Statement` that failed.
        `consistency` is the :class:`.ConsistencyLevel` that the operation was
        attempted at.
        `required_replicas` is the number of replicas that would have needed to
        acknowledge the operation to meet the requested consistency level.
        `alive_replicas` is the number of replicas that the coordinator
        considered alive at the time of the request.
        `retry_num` counts how many times the operation has been retried, so
        the first time this method is called, `retry_num` will be 0.
        By default, no retries will be attempted and the error will be re-raised.
        """
        return (self.RETHROW, None)
class FallthroughRetryPolicy(RetryPolicy):
    """
    A retry policy that never retries and always propagates failures to
    the application.
    """
    def on_read_timeout(self, *args, **kwargs):
        return (self.RETHROW, None)
    def on_write_timeout(self, *args, **kwargs):
        return (self.RETHROW, None)
    def on_unavailable(self, *args, **kwargs):
        return (self.RETHROW, None)
class DowngradingConsistencyRetryPolicy(RetryPolicy):
    """
    A retry policy that sometimes retries with a lower consistency level than
    the one initially requested.
    **BEWARE**: This policy may retry queries using a lower consistency
    level than the one initially requested. By doing so, it may break
    consistency guarantees. In other words, if you use this retry policy,
    there are cases (documented below) where a read at :attr:`~.QUORUM`
    *may not* see a preceding write at :attr:`~.QUORUM`. Do not use this
    policy unless you have understood the cases where this can happen and
    are ok with that. It is also recommended to subclass this class so
    that queries that required a consistency level downgrade can be
    recorded (so that repairs can be made later, etc).
    This policy implements the same retries as :class:`.RetryPolicy`,
    but on top of that, it also retries in the following cases:
    * On a read timeout: if the number of replicas that responded is
      greater than one but lower than is required by the requested
      consistency level, the operation is retried at a lower consistency
      level.
    * On a write timeout: if the operation is an :attr:`~.UNLOGGED_BATCH`
      and at least one replica acknowledged the write, the operation is
      retried at a lower consistency level.  Furthermore, for other
      write types, if at least one replica acknowledged the write, the
      timeout is ignored.
    * On an unavailable exception: if at least one replica is alive, the
      operation is retried at a lower consistency level.
    The reasoning behind this retry policy is as follows: if, based
    on the information the Cassandra coordinator node returns, retrying the
    operation with the initially requested consistency has a chance to
    succeed, do it. Otherwise, if based on that information we know the
    initially requested consistency level cannot be achieved currently, then:
    * For writes, ignore the exception (thus silently failing the
      consistency requirement) if we know the write has been persisted on at
      least one replica.
    * For reads, try reading at a lower consistency level (thus silently
      failing the consistency requirement).
    In other words, this policy implements the idea that if the requested
    consistency level cannot be achieved, the next best thing for writes is
    to make sure the data is persisted, and that reading something is better
    than reading nothing, even if there is a risk of reading stale data.
    """
    def _pick_consistency(self, num_responses):
        if num_responses >= 3:
            return (self.RETRY, ConsistencyLevel.THREE)
        elif num_responses >= 2:
            return (self.RETRY, ConsistencyLevel.TWO)
        elif num_responses >= 1:
            return (self.RETRY, ConsistencyLevel.ONE)
        else:
            return (self.RETHROW, None)
    def on_read_timeout(self, query, consistency, required_responses,
                        received_responses, data_retrieved, retry_num):
        if retry_num != 0:
            return (self.RETHROW, None)
        elif received_responses < required_responses:
            return self._pick_consistency(received_responses)
        elif not data_retrieved:
            return (self.RETRY, consistency)
        else:
            return (self.RETHROW, None)
    def on_write_timeout(self, query, consistency, write_type,
                         required_responses, received_responses, retry_num):
        if retry_num != 0:
            return (self.RETHROW, None)
        elif write_type in (WriteType.SIMPLE, WriteType.BATCH, WriteType.COUNTER):
            return (self.IGNORE, None)
        elif write_type == WriteType.UNLOGGED_BATCH:
            return self._pick_consistency(received_responses)
        elif write_type == WriteType.BATCH_LOG:
            return (self.RETRY, consistency)
        else:
            return (self.RETHROW, None)
    def on_unavailable(self, query, consistency, required_replicas, alive_replicas, retry_num):
        if retry_num != 0:
            return (self.RETHROW, None)
        else:
            return self._pick_consistency(alive_replicas)
 | |
| 
	# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class ProviderOperations(object):
    """ProviderOperations operations.
    :param client: Client for service requests.
    :param config: Configuration of service client.
    :param serializer: An object model serializer.
    :param deserializer: An object model deserializer.
    :ivar api_version: API Version. Constant value: "2016-03-01".
    """
    models = models
    def __init__(self, client, config, serializer, deserializer):
        self._client = client
        self._serialize = serializer
        self._deserialize = deserializer
        self.api_version = "2016-03-01"
        self.config = config
    def get_available_stacks(
            self, os_type_selected=None, custom_headers=None, raw=False, **operation_config):
        """Get available application frameworks and their versions.
        Get available application frameworks and their versions.
        :param os_type_selected: Possible values include: 'Windows', 'Linux'
        :type os_type_selected: str
        :param dict custom_headers: headers that will be added to the request
        :param bool raw: returns the direct response alongside the
         deserialized response
        :param operation_config: :ref:`Operation configuration
         overrides<msrest:optionsforoperations>`.
        :return: An iterator like instance of ApplicationStack
        :rtype:
         ~azure.mgmt.web.models.ApplicationStackPaged[~azure.mgmt.web.models.ApplicationStack]
        :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
        """
        def internal_paging(next_link=None, raw=False):
            if not next_link:
                # Construct URL
                url = self.get_available_stacks.metadata['url']
                # Construct parameters
                query_parameters = {}
                if os_type_selected is not None:
                    query_parameters['osTypeSelected'] = self._serialize.query("os_type_selected", os_type_selected, 'str')
                query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
            else:
                url = next_link
                query_parameters = {}
            # Construct headers
            header_parameters = {}
            header_parameters['Content-Type'] = 'application/json; charset=utf-8'
            if self.config.generate_client_request_id:
                header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
            if custom_headers:
                header_parameters.update(custom_headers)
            if self.config.accept_language is not None:
                header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
            # Construct and send request
            request = self._client.get(url, query_parameters)
            response = self._client.send(
                request, header_parameters, stream=False, **operation_config)
            if response.status_code not in [200]:
                exp = CloudError(response)
                exp.request_id = response.headers.get('x-ms-request-id')
                raise exp
            return response
        # Deserialize response
        deserialized = models.ApplicationStackPaged(internal_paging, self._deserialize.dependencies)
        if raw:
            header_dict = {}
            client_raw_response = models.ApplicationStackPaged(internal_paging, self._deserialize.dependencies, header_dict)
            return client_raw_response
        return deserialized
    get_available_stacks.metadata = {'url': '/providers/Microsoft.Web/availableStacks'}
    def list_operations(
            self, custom_headers=None, raw=False, **operation_config):
        """Gets all available operations for the Microsoft.Web resource provider.
        Also exposes resource metric definitions.
        Gets all available operations for the Microsoft.Web resource provider.
        Also exposes resource metric definitions.
        :param dict custom_headers: headers that will be added to the request
        :param bool raw: returns the direct response alongside the
         deserialized response
        :param operation_config: :ref:`Operation configuration
         overrides<msrest:optionsforoperations>`.
        :return: An iterator like instance of CsmOperationDescription
        :rtype:
         ~azure.mgmt.web.models.CsmOperationDescriptionPaged[~azure.mgmt.web.models.CsmOperationDescription]
        :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
        """
        def internal_paging(next_link=None, raw=False):
            if not next_link:
                # Construct URL
                url = self.list_operations.metadata['url']
                # Construct parameters
                query_parameters = {}
                query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
            else:
                url = next_link
                query_parameters = {}
            # Construct headers
            header_parameters = {}
            header_parameters['Content-Type'] = 'application/json; charset=utf-8'
            if self.config.generate_client_request_id:
                header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
            if custom_headers:
                header_parameters.update(custom_headers)
            if self.config.accept_language is not None:
                header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
            # Construct and send request
            request = self._client.get(url, query_parameters)
            response = self._client.send(
                request, header_parameters, stream=False, **operation_config)
            if response.status_code not in [200]:
                exp = CloudError(response)
                exp.request_id = response.headers.get('x-ms-request-id')
                raise exp
            return response
        # Deserialize response
        deserialized = models.CsmOperationDescriptionPaged(internal_paging, self._deserialize.dependencies)
        if raw:
            header_dict = {}
            client_raw_response = models.CsmOperationDescriptionPaged(internal_paging, self._deserialize.dependencies, header_dict)
            return client_raw_response
        return deserialized
    list_operations.metadata = {'url': '/providers/Microsoft.Web/operations'}
    def get_available_stacks_on_prem(
            self, os_type_selected=None, custom_headers=None, raw=False, **operation_config):
        """Get available application frameworks and their versions.
        Get available application frameworks and their versions.
        :param os_type_selected: Possible values include: 'Windows', 'Linux'
        :type os_type_selected: str
        :param dict custom_headers: headers that will be added to the request
        :param bool raw: returns the direct response alongside the
         deserialized response
        :param operation_config: :ref:`Operation configuration
         overrides<msrest:optionsforoperations>`.
        :return: An iterator like instance of ApplicationStack
        :rtype:
         ~azure.mgmt.web.models.ApplicationStackPaged[~azure.mgmt.web.models.ApplicationStack]
        :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
        """
        def internal_paging(next_link=None, raw=False):
            if not next_link:
                # Construct URL
                url = self.get_available_stacks_on_prem.metadata['url']
                path_format_arguments = {
                    'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
                }
                url = self._client.format_url(url, **path_format_arguments)
                # Construct parameters
                query_parameters = {}
                if os_type_selected is not None:
                    query_parameters['osTypeSelected'] = self._serialize.query("os_type_selected", os_type_selected, 'str')
                query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
            else:
                url = next_link
                query_parameters = {}
            # Construct headers
            header_parameters = {}
            header_parameters['Content-Type'] = 'application/json; charset=utf-8'
            if self.config.generate_client_request_id:
                header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
            if custom_headers:
                header_parameters.update(custom_headers)
            if self.config.accept_language is not None:
                header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
            # Construct and send request
            request = self._client.get(url, query_parameters)
            response = self._client.send(
                request, header_parameters, stream=False, **operation_config)
            if response.status_code not in [200]:
                exp = CloudError(response)
                exp.request_id = response.headers.get('x-ms-request-id')
                raise exp
            return response
        # Deserialize response
        deserialized = models.ApplicationStackPaged(internal_paging, self._deserialize.dependencies)
        if raw:
            header_dict = {}
            client_raw_response = models.ApplicationStackPaged(internal_paging, self._deserialize.dependencies, header_dict)
            return client_raw_response
        return deserialized
    get_available_stacks_on_prem.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Web/availableStacks'}
 | |
| 
	from __future__ import division
# Copyright (c) 2016 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from builtins import range
import time
import math
import mock
import pytest
from jaeger_client.sampler import (
    Sampler,
    ConstSampler,
    ProbabilisticSampler,
    RateLimitingSampler,
    RemoteControlledSampler,
    GuaranteedThroughputProbabilisticSampler,
    AdaptiveSampler,
    DEFAULT_SAMPLING_PROBABILITY,
    get_sampling_probability,
    get_rate_limit,
)
MAX_INT = 1 << 63
def get_tags(type, param):
    return {
        'sampler.type': type,
        'sampler.param': param,
    }
def test_abstract_sampler_errors():
    sampler = Sampler()
    with pytest.raises(NotImplementedError):
        sampler.is_sampled(trace_id=123)
    with pytest.raises(NotImplementedError):
        sampler.close()
def test_probabilistic_sampler_errors():
    with pytest.raises(AssertionError):
        ProbabilisticSampler(-0.1)
    with pytest.raises(AssertionError):
        ProbabilisticSampler(1.1)
def test_probabilistic_sampler():
    sampler = ProbabilisticSampler(0.5)
    assert MAX_INT == 0x8000000000000000
    sampled, tags = sampler.is_sampled(MAX_INT-10)
    assert sampled
    assert tags == get_tags('probabilistic', 0.5)
    sampled, _ = sampler.is_sampled(MAX_INT+10)
    assert not sampled
    sampler.close()
    assert '%s' % sampler == 'ProbabilisticSampler(0.5)'
def test_const_sampler():
    sampler = ConstSampler(True)
    sampled, _ = sampler.is_sampled(1)
    assert sampled
    sampled, _ = sampler.is_sampled(MAX_INT)
    assert sampled
    sampler = ConstSampler(False)
    sampled, tags = sampler.is_sampled(1)
    assert not sampled
    sampled, tags = sampler.is_sampled(MAX_INT)
    assert not sampled
    assert tags == get_tags('const', False)
    assert '%s' % sampler == 'ConstSampler(False)'
def test_rate_limiting_sampler():
    sampler = RateLimitingSampler(2)
    # stop time by overwriting timestamp() function to always return
    # the same time
    ts = time.time()
    sampler.rate_limiter.last_tick = ts
    with mock.patch('jaeger_client.rate_limiter.RateLimiter.timestamp') \
            as mock_time:
        mock_time.side_effect = lambda: ts  # always return same time
        assert sampler.rate_limiter.timestamp() == ts
        sampled, _ = sampler.is_sampled(0)
        assert sampled, 'initial balance allows first item'
        sampled, _ = sampler.is_sampled(0)
        assert sampled, 'initial balance allows second item'
        sampled, _ = sampler.is_sampled(0)
        assert not sampled, 'initial balance exhausted'
        # move time 250ms forward, not enough credits to pay for one sample
        mock_time.side_effect = lambda: ts + 0.25
        sampled, _ = sampler.is_sampled(0)
        assert not sampled, 'not enough time passed for full item'
        # move time 500ms forward, now enough credits to pay for one sample
        mock_time.side_effect = lambda: ts + 0.5
        sampled, _ = sampler.is_sampled(0)
        assert sampled, 'enough time for new item'
        sampled, _ = sampler.is_sampled(0)
        assert not sampled, 'no more balance'
        # move time 5s forward, enough to accumulate credits for 10 samples,
        # but it should still be capped at 2
        sampler.last_tick = ts  # reset the timer
        mock_time.side_effect = lambda: ts + 5
        sampled, _ = sampler.is_sampled(0)
        assert sampled, 'enough time for new item'
        sampled, _ = sampler.is_sampled(0)
        assert sampled, 'enough time for second new item'
        for i in range(0, 8):
            sampled, tags = sampler.is_sampled(0)
            assert not sampled, 'but no further, since time is stopped'
        assert tags == get_tags('ratelimiting', 2)
    sampler.close()
    assert '%s' % sampler == 'RateLimitingSampler(2)'
    # Test with rate limit of greater than 1 second
    sampler = RateLimitingSampler(0.1)
    ts = time.time()
    sampler.rate_limiter.last_tick = ts
    with mock.patch('jaeger_client.rate_limiter.RateLimiter.timestamp') \
            as mock_time:
        mock_time.side_effect = lambda: ts  # always return same time
        assert sampler.rate_limiter.timestamp() == ts
        sampled, _ = sampler.is_sampled(0)
        assert sampled, 'initial balance allows first item'
        sampled, _ = sampler.is_sampled(0)
        assert not sampled, 'initial balance exhausted'
        # move time 11s forward, enough credits to pay for one sample
        mock_time.side_effect = lambda: ts + 11
        sampled, _ = sampler.is_sampled(0)
        assert sampled
    sampler.close()
    assert '%s' % sampler == 'RateLimitingSampler(0.1)'
def test_guaranteed_throughput_probabilistic_sampler():
    sampler = GuaranteedThroughputProbabilisticSampler('op', 2, 0.5)
    sampled, tags = sampler.is_sampled(MAX_INT-10)
    assert sampled
    assert tags == get_tags('probabilistic', 0.5)
    sampled, tags = sampler.is_sampled(MAX_INT+10)
    assert sampled
    assert tags == get_tags('lowerbound', 0.5)
    sampled, _ = sampler.is_sampled(MAX_INT+10)
    assert not sampled
    assert '%s' % sampler == 'GuaranteedThroughputProbabilisticSampler(op, 0.5, 2)'
    sampler.update(3, 0.51)
    sampled, tags = sampler.is_sampled(MAX_INT-10)
    assert sampled
    assert tags == get_tags('probabilistic', 0.51)
    sampled, tags = sampler.is_sampled(MAX_INT+(MAX_INT/4))
    assert sampled
    assert tags == get_tags('lowerbound', 0.51)
    assert '%s' % sampler == 'GuaranteedThroughputProbabilisticSampler(op, 0.51, 3)'
    sampler.close()
def test_adaptive_sampler():
    strategies = {
        "defaultSamplingProbability":0.51,
        "defaultLowerBoundTracesPerSecond":3,
        "perOperationStrategies":
        [
            {
                "operation":"op",
                "probabilisticSampling":{
                    "samplingRate":0.5
                }
            }
        ]
    }
    sampler = AdaptiveSampler(strategies, 2)
    sampled, tags = sampler.is_sampled(MAX_INT-10, 'op')
    assert sampled
    assert tags == get_tags('probabilistic', 0.5)
    # This operation is seen for the first time by the sampler
    sampled, tags = sampler.is_sampled(MAX_INT-10, "new_op")
    assert sampled
    assert tags == get_tags('probabilistic', 0.51)
    sampled, tags = sampler.is_sampled(MAX_INT+(MAX_INT/4), "new_op")
    assert sampled
    assert tags == get_tags('lowerbound', 0.51)
    # This operation is seen for the first time by the sampler but surpasses
    # max_operations of 2. The default probabilistic sampler will be used
    sampled, tags = sampler.is_sampled(MAX_INT-10, "new_op_2")
    assert sampled
    assert tags == get_tags('probabilistic', 0.51)
    sampled, _ = sampler.is_sampled(MAX_INT+(MAX_INT/4), "new_op_2")
    assert not sampled
    assert '%s' % sampler == 'AdaptiveSampler(0.51, 3, 2)'
    # Update the strategies
    strategies = {
        "defaultSamplingProbability":0.52,
        "defaultLowerBoundTracesPerSecond":4,
        "perOperationStrategies":
        [
            {
                "operation":"op",
                "probabilisticSampling":{
                    "samplingRate":0.52
                }
            },
            {
                "operation":"new_op_3",
                "probabilisticSampling":{
                    "samplingRate":0.53
                }
            }
        ]
    }
    sampler.update(strategies)
    # The probability for op has been updated
    sampled, tags = sampler.is_sampled(MAX_INT-10, 'op')
    assert sampled
    assert tags == get_tags('probabilistic', 0.52)
    # A new operation has been added
    sampled, tags = sampler.is_sampled(MAX_INT-10, 'new_op_3')
    assert sampled
    assert tags == get_tags('probabilistic', 0.53)
    assert '%s' % sampler == 'AdaptiveSampler(0.52, 4, 2)'
    sampler.close()
def test_adaptive_sampler_default_values():
    adaptive_sampler = AdaptiveSampler({}, 2)
    assert '%s' % adaptive_sampler == 'AdaptiveSampler(0.001, 0.00166666666667, 2)', 'sampler should use default values'
    sampled, tags = adaptive_sampler.is_sampled(0, 'op')
    assert sampled
    assert tags == get_tags('probabilistic', 0.001), 'should use default probability'
    assert '%s' % adaptive_sampler.samplers['op'] == 'GuaranteedThroughputProbabilisticSampler(op, 0.001, 0.00166666666667)'
    adaptive_sampler.update(strategies = {
        "defaultLowerBoundTracesPerSecond":4,
        "perOperationStrategies":
            [
                {
                    "operation":"new_op",
                    "probabilisticSampling":{
                        "samplingRate":0.002
                    }
                }
            ]
    })
    assert '%s' % adaptive_sampler == 'AdaptiveSampler(0.001, 4, 2)'
    sampled, tags = adaptive_sampler.is_sampled(0, 'new_op')
    assert sampled
    assert tags == get_tags('probabilistic', 0.002)
    assert '%s' % adaptive_sampler.samplers['new_op'] == 'GuaranteedThroughputProbabilisticSampler(new_op, 0.002, 4)'
    sampled, tags = adaptive_sampler.is_sampled(0, 'op')
    assert sampled
    assert tags == get_tags('probabilistic', 0.001)
    # TODO ruh roh, the lowerbound isn't changed if the operation isn't included in perOperationStrategies
    assert '%s' % adaptive_sampler.samplers['op'] == 'GuaranteedThroughputProbabilisticSampler(op, 0.001, 0.00166666666667)'
def test_sampler_equality():
    const1 = ConstSampler(True)
    const2 = ConstSampler(True)
    const3 = ConstSampler(False)
    assert const1 == const2
    assert const1 != const3
    prob1 = ProbabilisticSampler(rate=0.01)
    prob2 = ProbabilisticSampler(rate=0.01)
    prob3 = ProbabilisticSampler(rate=0.02)
    assert prob1 == prob2
    assert prob1 != prob3
    assert const1 != prob1
    rate1 = RateLimitingSampler(max_traces_per_second=0.01)
    rate2 = RateLimitingSampler(max_traces_per_second=0.01)
    rate3 = RateLimitingSampler(max_traces_per_second=0.02)
    assert rate1 == rate2
    assert rate1 != rate3
    assert rate1 != const1
    assert rate1 != prob1
def test_remotely_controlled_sampler():
    sampler = RemoteControlledSampler(
        channel=mock.MagicMock(),
        service_name='x'
    )
    sampled, tags = sampler.is_sampled(1)
    assert sampled
    assert tags == get_tags('probabilistic', DEFAULT_SAMPLING_PROBABILITY)
    init_sampler = mock.MagicMock()
    init_sampler.is_sampled = mock.MagicMock()
    channel = mock.MagicMock()
    channel.io_loop = None
    sampler = RemoteControlledSampler(
        channel=channel,
        service_name='x',
        init_sampler=init_sampler,
        logger=mock.MagicMock(),
    )
    assert init_sampler.is_sampled.call_count == 1
    sampler.is_sampled(1)
    assert init_sampler.is_sampled.call_count == 2
    sampler.io_loop = mock.MagicMock()
    # noinspection PyProtectedMember
    sampler._init_polling()
    # noinspection PyProtectedMember
    sampler._delayed_polling()
    sampler.close()
# noinspection PyProtectedMember
def test_sampling_request_callback():
    channel = mock.MagicMock()
    channel.io_loop = mock.MagicMock()
    error_reporter = mock.MagicMock()
    error_reporter.error = mock.MagicMock()
    sampler = RemoteControlledSampler(
        channel=channel,
        service_name='x',
        error_reporter=error_reporter,
        max_operations=10,
    )
    return_value = mock.MagicMock()
    return_value.exception = lambda *args: False
    probabilistic_strategy = """
    {
        "strategyType":0,
        "probabilisticSampling":
        {
            "samplingRate":0.002
        }
    }
    """
    return_value.result = lambda *args: \
        type('obj', (object,), {'body': probabilistic_strategy})()
    sampler._sampling_request_callback(return_value)
    assert '%s' % sampler.sampler == 'ProbabilisticSampler(0.002)', 'sampler should have changed to probabilistic'
    prev_sampler = sampler.sampler
    sampler._sampling_request_callback(return_value)
    assert prev_sampler is sampler.sampler, "strategy hasn't changed so sampler should not change"
    adaptive_sampling_strategy = """
    {
        "strategyType":0,
        "operationSampling":
        {
            "defaultSamplingProbability":0.001,
            "defaultLowerBoundTracesPerSecond":2,
            "perOperationStrategies":
            [
                {
                    "operation":"op",
                    "probabilisticSampling":{
                        "samplingRate":0.002
                    }
                }
            ]
        }
    }
    """
    return_value.result = lambda *args: \
        type('obj', (object,), {'body': adaptive_sampling_strategy})()
    sampler._sampling_request_callback(return_value)
    assert '%s' % sampler.sampler == 'AdaptiveSampler(0.001, 2, 10)', 'sampler should have changed to adaptive'
    prev_sampler = sampler.sampler
    sampler._sampling_request_callback(return_value)
    assert prev_sampler is sampler.sampler, "strategy hasn't changed so sampler should not change"
    return_value.exception = lambda *args: True
    sampler._sampling_request_callback(return_value)
    assert error_reporter.error.call_count == 1
    assert prev_sampler is sampler.sampler, 'error fetching strategy should not update the sampler'
    return_value.exception = lambda *args: False
    return_value.result = lambda *args: type('obj', (object,), {'body': 'bad_json'})()
    sampler._sampling_request_callback(return_value)
    assert error_reporter.error.call_count == 2
    assert prev_sampler is sampler.sampler, 'error updating sampler should not update the sampler'
    return_value.result = lambda *args: \
        type('obj', (object,), {'body': probabilistic_strategy})()
    sampler._sampling_request_callback(return_value)
    assert '%s' % sampler.sampler == 'ProbabilisticSampler(0.002)', 'updating sampler from adaptive to probabilistic should work'
    sampler.close()
probabilistic_sampler = ProbabilisticSampler(0.002)
other_probabilistic_sampler = ProbabilisticSampler(0.003)
rate_limiting_sampler = RateLimitingSampler(10)
other_rate_limiting_sampler = RateLimitingSampler(20)
@pytest.mark.parametrize("response,init_sampler,expected_sampler,err_count,err_msg,reference_equivalence", [
    (
        {"strategyType":0,"probabilisticSampling":{"samplingRate":0.003}},
        probabilistic_sampler,
        other_probabilistic_sampler,
        0,
        'sampler should update to new probabilistic sampler',
        False,
    ),
    (
        {"strategyType":0,"probabilisticSampling":{"samplingRate":400}},
        probabilistic_sampler,
        probabilistic_sampler,
        1,
        'sampler should remain the same if strategy is invalid',
        True,
    ),
    (
        {"strategyType":0,"probabilisticSampling":{"samplingRate":0.002}},
        probabilistic_sampler,
        probabilistic_sampler,
        0,
        'sampler should remain the same with the same strategy',
        True,
    ),
    (
        {"strategyType":1,"rateLimitingSampling":{"maxTracesPerSecond":10}},
        probabilistic_sampler,
        rate_limiting_sampler,
        0,
        'sampler should update to new rate limiting sampler',
        False,
    ),
    (
        {"strategyType":1,"rateLimitingSampling":{"maxTracesPerSecond":10}},
        rate_limiting_sampler,
        rate_limiting_sampler,
        0,
        'sampler should remain the same with the same strategy',
        True,
    ),
    (
        {"strategyType":1,"rateLimitingSampling":{"maxTracesPerSecond":-10}},
        rate_limiting_sampler,
        rate_limiting_sampler,
        1,
        'sampler should remain the same if strategy is invalid',
        True,
    ),
    (
        {"strategyType":1,"rateLimitingSampling":{"maxTracesPerSecond":20}},
        rate_limiting_sampler,
        other_rate_limiting_sampler,
        0,
        'sampler should update to new rate limiting sampler',
        False,
    ),
    (
        {},
        rate_limiting_sampler,
        rate_limiting_sampler,
        1,
        'sampler should remain the same if strategy is empty',
        True,
    ),
    (
        {"strategyType":2},
        rate_limiting_sampler,
        rate_limiting_sampler,
        1,
        'sampler should remain the same if strategy is invalid',
        True,
    ),
])
def test_update_sampler(response, init_sampler, expected_sampler, err_count, err_msg, reference_equivalence):
    error_reporter = mock.MagicMock()
    error_reporter.error = mock.MagicMock()
    remote_sampler = RemoteControlledSampler(
        channel=mock.MagicMock(),
        service_name='x',
        error_reporter=error_reporter,
        max_operations=10,
        init_sampler=init_sampler,
    )
    # noinspection PyProtectedMember
    remote_sampler._update_sampler(response)
    assert error_reporter.error.call_count == err_count
    if reference_equivalence:
        assert remote_sampler.sampler is expected_sampler, err_msg
    else:
        assert remote_sampler.sampler == expected_sampler, err_msg
    remote_sampler.close()
# noinspection PyProtectedMember
def test_update_sampler_adaptive_sampler():
    error_reporter = mock.MagicMock()
    error_reporter.error = mock.MagicMock()
    remote_sampler = RemoteControlledSampler(
        channel=mock.MagicMock(),
        service_name='x',
        error_reporter=error_reporter,
        max_operations=10,
    )
    response = {
        "strategyType":1,
        "operationSampling":
        {
            "defaultSamplingProbability":0.001,
            "defaultLowerBoundTracesPerSecond":2,
            "perOperationStrategies":
            [
                {
                    "operation":"op",
                    "probabilisticSampling":{
                        "samplingRate":0.002
                    }
                }
            ]
        }
    }
    remote_sampler._update_sampler(response)
    assert '%s' % remote_sampler.sampler == 'AdaptiveSampler(0.001, 2, 10)'
    new_response = {
        "strategyType":1,
        "operationSampling":
        {
            "defaultSamplingProbability":0.51,
            "defaultLowerBoundTracesPerSecond":3,
            "perOperationStrategies":
            [
                {
                    "operation":"op",
                    "probabilisticSampling":{
                        "samplingRate":0.002
                    }
                }
            ]
        }
    }
    remote_sampler._update_sampler(new_response)
    assert '%s' % remote_sampler.sampler == 'AdaptiveSampler(0.51, 3, 10)'
    remote_sampler._update_sampler({"strategyType":0,"probabilisticSampling":{"samplingRate":0.004}})
    assert '%s' % remote_sampler.sampler == 'ProbabilisticSampler(0.004)', \
        'should not fail going from adaptive sampler to probabilistic sampler'
    remote_sampler._update_sampler({"strategyType":1,"operationSampling":{"defaultSamplingProbability":0.4}})
    assert '%s' % remote_sampler.sampler == 'AdaptiveSampler(0.4, 0.00166666666667, 10)'
    remote_sampler.close()
@pytest.mark.parametrize("strategy,expected", [
    ({"probabilisticSampling":{"samplingRate":0.003}}, 0.003),
    ({}, 0.001),
    (None, 0.001),
    ({"probabilisticSampling":{}}, 0.001),
    ({"probabilisticSampling":None}, 0.001),
])
def test_get_sampling_probability(strategy, expected):
    assert expected == get_sampling_probability(strategy)
@pytest.mark.parametrize("strategy,expected", [
    ({"rateLimitingSampling":{"maxTracesPerSecond":1}}, 1),
    ({}, 0.0016666),
    (None, 0.0016666),
    ({"rateLimitingSampling":{}}, 0.0016666),
    ({"rateLimitingSampling":None}, 0.0016666),
])
def test_get_rate_limit(strategy, expected):
    assert math.fabs(expected - get_rate_limit(strategy)) < 0.0001
 | |
| 
	# Copyright 2012 Nebula, Inc.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.
from cinderclient.v2 import availability_zones
from cinderclient.v2 import cgsnapshots
from cinderclient.v2 import consistencygroups
from cinderclient.v2.contrib import list_extensions as cinder_list_extensions
from cinderclient.v2 import pools
from cinderclient.v2 import qos_specs
from cinderclient.v2 import quotas
from cinderclient.v2 import services
from cinderclient.v2 import volume_backups as vol_backups
from cinderclient.v2 import volume_encryption_types as vol_enc_types
from cinderclient.v2 import volume_snapshots as vol_snaps
from cinderclient.v2 import volume_transfers
from cinderclient.v2 import volume_type_access
from cinderclient.v2 import volume_types
from cinderclient.v2 import volumes
from cinderclient.v3 import group_snapshots
from cinderclient.v3 import group_types
from cinderclient.v3 import groups
from openstack_dashboard import api
from openstack_dashboard.api import cinder as cinder_api
from openstack_dashboard.test.test_data import utils
from openstack_dashboard.usage import quotas as usage_quotas
def data(TEST):
    TEST.cinder_services = utils.TestDataContainer()
    TEST.cinder_volumes = utils.TestDataContainer()
    TEST.cinder_volume_backups = utils.TestDataContainer()
    TEST.cinder_volume_encryption_types = utils.TestDataContainer()
    TEST.cinder_volume_types = utils.TestDataContainer()
    TEST.cinder_type_access = utils.TestDataContainer()
    TEST.cinder_volume_encryption = utils.TestDataContainer()
    TEST.cinder_bootable_volumes = utils.TestDataContainer()
    TEST.cinder_qos_specs = utils.TestDataContainer()
    TEST.cinder_qos_spec_associations = utils.TestDataContainer()
    TEST.cinder_volume_snapshots = utils.TestDataContainer()
    TEST.cinder_extensions = utils.TestDataContainer()
    TEST.cinder_quotas = utils.TestDataContainer()
    TEST.cinder_quota_usages = utils.TestDataContainer()
    TEST.cinder_availability_zones = utils.TestDataContainer()
    TEST.cinder_volume_transfers = utils.TestDataContainer()
    TEST.cinder_pools = utils.TestDataContainer()
    TEST.cinder_consistencygroups = utils.TestDataContainer()
    TEST.cinder_cgroup_volumes = utils.TestDataContainer()
    TEST.cinder_cg_snapshots = utils.TestDataContainer()
    TEST.cinder_groups = utils.TestDataContainer()
    TEST.cinder_group_types = utils.TestDataContainer()
    TEST.cinder_group_snapshots = utils.TestDataContainer()
    TEST.cinder_group_volumes = utils.TestDataContainer()
    TEST.cinder_volume_snapshots_with_groups = utils.TestDataContainer()
    # Services
    service_1 = services.Service(services.ServiceManager(None), {
        "service": "cinder-scheduler",
        "status": "enabled",
        "binary": "cinder-scheduler",
        "zone": "internal",
        "state": "up",
        "updated_at": "2013-07-08T05:21:00.000000",
        "host": "devstack001",
        "disabled_reason": None
    })
    service_2 = services.Service(services.ServiceManager(None), {
        "service": "cinder-volume",
        "status": "enabled",
        "binary": "cinder-volume",
        "zone": "nova",
        "state": "up",
        "updated_at": "2013-07-08T05:20:51.000000",
        "host": "devstack001",
        "disabled_reason": None
    })
    TEST.cinder_services.add(service_1)
    TEST.cinder_services.add(service_2)
    # Volumes - Cinder v1
    volume = volumes.Volume(
        volumes.VolumeManager(None),
        {'id': "11023e92-8008-4c8b-8059-7f2293ff3887",
         'status': 'available',
         'size': 40,
         'name': 'Volume name',
         'display_description': 'Volume description',
         'created_at': '2014-01-27 10:30:00',
         'volume_type': None,
         'attachments': []})
    nameless_volume = volumes.Volume(
        volumes.VolumeManager(None),
        {"id": "4b069dd0-6eaa-4272-8abc-5448a68f1cce",
         "status": 'available',
         "size": 10,
         "name": '',
         "display_description": '',
         "device": "/dev/hda",
         "created_at": '2010-11-21 18:34:25',
         "volume_type": 'vol_type_1',
         "attachments": []})
    other_volume = volumes.Volume(
        volumes.VolumeManager(None),
        {'id': "21023e92-8008-1234-8059-7f2293ff3889",
         'status': 'in-use',
         'size': 10,
         'name': u'my_volume',
         'display_description': '',
         'created_at': '2013-04-01 10:30:00',
         'volume_type': None,
         'attachments': [{"id": "1", "server_id": '1',
                          "device": "/dev/hda"}]})
    volume_with_type = volumes.Volume(
        volumes.VolumeManager(None),
        {'id': "7dcb47fd-07d9-42c2-9647-be5eab799ebe",
         'name': 'my_volume2',
         'status': 'in-use',
         'size': 10,
         'name': u'my_volume2',
         'display_description': '',
         'created_at': '2013-04-01 10:30:00',
         'volume_type': 'vol_type_2',
         'attachments': [{"id": "2", "server_id": '2',
                          "device": "/dev/hdb"}]})
    non_bootable_volume = volumes.Volume(
        volumes.VolumeManager(None),
        {'id': "21023e92-8008-1234-8059-7f2293ff3890",
         'status': 'in-use',
         'size': 10,
         'name': u'my_volume',
         'display_description': '',
         'created_at': '2013-04-01 10:30:00',
         'volume_type': None,
         'bootable': False,
         'attachments': [{"id": "1", "server_id": '1',
                          "device": "/dev/hda"}]})
    volume.bootable = 'true'
    nameless_volume.bootable = 'true'
    other_volume.bootable = 'true'
    TEST.cinder_volumes.add(api.cinder.Volume(volume))
    TEST.cinder_volumes.add(api.cinder.Volume(nameless_volume))
    TEST.cinder_volumes.add(api.cinder.Volume(other_volume))
    TEST.cinder_volumes.add(api.cinder.Volume(volume_with_type))
    TEST.cinder_bootable_volumes.add(api.cinder.Volume(non_bootable_volume))
    vol_type1 = volume_types.VolumeType(
        volume_types.VolumeTypeManager(None),
        {'id': u'1',
         'name': u'vol_type_1',
         'description': 'type 1 description',
         'extra_specs': {'foo': 'bar',
                         'volume_backend_name': 'backend_1'}})
    vol_type2 = volume_types.VolumeType(
        volume_types.VolumeTypeManager(None),
        {'id': u'2',
         'name': u'vol_type_2',
         'description': 'type 2 description'})
    vol_type3 = volume_types.VolumeType(
        volume_types.VolumeTypeManager(None),
        {'id': u'3',
         'name': u'vol_type_3',
         'is_public': False,
         'description': 'type 3 description'})
    TEST.cinder_volume_types.add(vol_type1, vol_type2, vol_type3)
    vol_type_access1 = volume_type_access.VolumeTypeAccess(
        volume_type_access.VolumeTypeAccessManager(None),
        {'volume_type_id': u'1', 'project_id': u'1'})
    TEST.cinder_type_access.add(vol_type_access1)
    # Volumes - Cinder v2
    volume_v2 = volumes.Volume(
        volumes.VolumeManager(None),
        {'id': "31023e92-8008-4c8b-8059-7f2293ff1234",
         'name': 'v2_volume',
         'description': "v2 Volume Description",
         'status': 'available',
         'size': 20,
         'created_at': '2014-01-27 10:30:00',
         'volume_type': None,
         'os-vol-host-attr:host': 'host@backend-name#pool',
         'bootable': 'true',
         'attachments': []})
    volume_v2.bootable = 'true'
    TEST.cinder_volumes.add(api.cinder.Volume(volume_v2))
    snapshot = vol_snaps.Snapshot(
        vol_snaps.SnapshotManager(None),
        {'id': '5f3d1c33-7d00-4511-99df-a2def31f3b5d',
         'display_name': 'test snapshot',
         'display_description': 'volume snapshot',
         'size': 40,
         'status': 'available',
         'volume_id': '11023e92-8008-4c8b-8059-7f2293ff3887'})
    snapshot2 = vol_snaps.Snapshot(
        vol_snaps.SnapshotManager(None),
        {'id': 'c9d0881a-4c0b-4158-a212-ad27e11c2b0f',
         'name': '',
         'description': 'v2 volume snapshot description',
         'size': 80,
         'status': 'available',
         'volume_id': '31023e92-8008-4c8b-8059-7f2293ff1234'})
    snapshot3 = vol_snaps.Snapshot(
        vol_snaps.SnapshotManager(None),
        {'id': 'c9d0881a-4c0b-4158-a212-ad27e11c2b0e',
         'name': '',
         'description': 'v2 volume snapshot description 2',
         'size': 80,
         'status': 'available',
         'volume_id': '31023e92-8008-4c8b-8059-7f2293ff1234'})
    snapshot4 = vol_snaps.Snapshot(
        vol_snaps.SnapshotManager(None),
        {'id': 'cd6be1eb-82ca-4587-8036-13c37c00c2b1',
         'name': '',
         'description': 'v2 volume snapshot with metadata description',
         'size': 80,
         'status': 'available',
         'volume_id': '31023e92-8008-4c8b-8059-7f2293ff1234',
         'metadata': {'snapshot_meta_key': 'snapshot_meta_value'}})
    snapshot.bootable = 'true'
    snapshot2.bootable = 'true'
    TEST.cinder_volume_snapshots.add(api.cinder.VolumeSnapshot(snapshot))
    TEST.cinder_volume_snapshots.add(api.cinder.VolumeSnapshot(snapshot2))
    TEST.cinder_volume_snapshots.add(api.cinder.VolumeSnapshot(snapshot3))
    TEST.cinder_volume_snapshots.add(api.cinder.VolumeSnapshot(snapshot4))
    TEST.cinder_volume_snapshots.first()._volume = volume
    # Volume Type Encryption
    vol_enc_type1 = vol_enc_types.VolumeEncryptionType(
        vol_enc_types.VolumeEncryptionTypeManager(None),
        {'volume_type_id': u'1',
         'control_location': "front-end",
         'key_size': 512,
         'provider': "a-provider",
         'cipher': "a-cipher"})
    vol_enc_type2 = vol_enc_types.VolumeEncryptionType(
        vol_enc_types.VolumeEncryptionTypeManager(None),
        {'volume_type_id': u'2',
         'control_location': "front-end",
         'key_size': 256,
         'provider': "a-provider",
         'cipher': "a-cipher"})
    vol_unenc_type1 = vol_enc_types.VolumeEncryptionType(
        vol_enc_types.VolumeEncryptionTypeManager(None), {})
    TEST.cinder_volume_encryption_types.add(vol_enc_type1, vol_enc_type2,
                                            vol_unenc_type1)
    volume_backup1 = vol_backups.VolumeBackup(
        vol_backups.VolumeBackupManager(None),
        {'id': 'a374cbb8-3f99-4c3f-a2ef-3edbec842e31',
         'name': 'backup1',
         'description': 'volume backup 1',
         'size': 10,
         'status': 'available',
         'container_name': 'volumebackups',
         'volume_id': '11023e92-8008-4c8b-8059-7f2293ff3887'})
    volume_backup2 = vol_backups.VolumeBackup(
        vol_backups.VolumeBackupManager(None),
        {'id': 'c321cbb8-3f99-4c3f-a2ef-3edbec842e52',
         'name': 'backup2',
         'description': 'volume backup 2',
         'size': 20,
         'status': 'available',
         'container_name': 'volumebackups',
         'volume_id': '31023e92-8008-4c8b-8059-7f2293ff1234'})
    volume_backup3 = vol_backups.VolumeBackup(
        vol_backups.VolumeBackupManager(None),
        {'id': 'c321cbb8-3f99-4c3f-a2ef-3edbec842e53',
         'name': 'backup3',
         'description': 'volume backup 3',
         'size': 20,
         'status': 'available',
         'container_name': 'volumebackups',
         'volume_id': '31023e92-8008-4c8b-8059-7f2293ff1234'})
    TEST.cinder_volume_backups.add(volume_backup1)
    TEST.cinder_volume_backups.add(volume_backup2)
    TEST.cinder_volume_backups.add(volume_backup3)
    # Volume Encryption
    vol_enc_metadata1 = volumes.Volume(
        volumes.VolumeManager(None),
        {'cipher': 'test-cipher',
         'key_size': 512,
         'provider': 'test-provider',
         'control_location': 'front-end'})
    vol_unenc_metadata1 = volumes.Volume(
        volumes.VolumeManager(None),
        {})
    TEST.cinder_volume_encryption.add(vol_enc_metadata1)
    TEST.cinder_volume_encryption.add(vol_unenc_metadata1)
    # v2 extensions
    extensions = [
        {'alias': 'os-services',
         'description': 'Services support.',
         'links': '[]',
         'name': 'Services',
         'updated': '2012-10-28T00:00:00-00:00'},
        {'alias': 'os-admin-actions',
         'description': 'Enable admin actions.',
         'links': '[]',
         'name': 'AdminActions',
         'updated': '2012-08-25T00:00:00+00:00'},
        {'alias': 'os-volume-transfer',
         'description': 'Volume transfer management support.',
         'links': '[]',
         'name': 'VolumeTransfer',
         'updated': '2013-05-29T00:00:00+00:00'},
    ]
    extensions = [
        cinder_list_extensions.ListExtResource(
            cinder_list_extensions.ListExtManager(None), ext)
        for ext in extensions
    ]
    TEST.cinder_extensions.add(*extensions)
    # Quota Sets
    quota_data = dict(volumes='1',
                      snapshots='1',
                      gigabytes='1000')
    quota = quotas.QuotaSet(quotas.QuotaSetManager(None), quota_data)
    TEST.cinder_quotas.add(api.base.QuotaSet(quota))
    # Quota Usages
    quota_usage_data = {'gigabytes': {'used': 0,
                                      'quota': 1000},
                        'volumes': {'used': 0,
                                    'quota': 10},
                        'snapshots': {'used': 0,
                                      'quota': 10}}
    quota_usage = usage_quotas.QuotaUsage()
    for k, v in quota_usage_data.items():
        quota_usage.add_quota(api.base.Quota(k, v['quota']))
        quota_usage.tally(k, v['used'])
    TEST.cinder_quota_usages.add(quota_usage)
    # Availability Zones
    # Cinder returns the following structure from os-availability-zone
    # {"availabilityZoneInfo":
    # [{"zoneState": {"available": true}, "zoneName": "nova"}]}
    # Note that the default zone is still "nova" even though this is cinder
    TEST.cinder_availability_zones.add(
        availability_zones.AvailabilityZone(
            availability_zones.AvailabilityZoneManager(None),
            {
                'zoneName': 'nova',
                'zoneState': {'available': True}
            }
        )
    )
    # Cinder Limits
    limits = {
        "absolute": {
            "totalVolumesUsed": 4,
            "totalGigabytesUsed": 400,
            'totalSnapshotsUsed': 3,
            "maxTotalVolumes": 20,
            "maxTotalVolumeGigabytes": 1000,
            'maxTotalSnapshots': 10,
        }
    }
    TEST.cinder_limits = limits
    # QOS Specs
    qos_spec1 = qos_specs.QoSSpecs(
        qos_specs.QoSSpecsManager(None),
        {"id": "418db45d-6992-4674-b226-80aacad2073c",
         "name": "high_iops",
         "consumer": "back-end",
         "specs": {"minIOPS": "1000", "maxIOPS": '100000'}})
    qos_spec2 = qos_specs.QoSSpecs(
        qos_specs.QoSSpecsManager(None),
        {"id": "6ed7035f-992e-4075-8ed6-6eff19b3192d",
         "name": "high_bws",
         "consumer": "back-end",
         "specs": {"maxBWS": '5000'}})
    TEST.cinder_qos_specs.add(qos_spec1, qos_spec2)
    vol_type1.associated_qos_spec = qos_spec1.name
    TEST.cinder_qos_spec_associations.add(vol_type1)
    # volume_transfers
    transfer_1 = volume_transfers.VolumeTransfer(
        volume_transfers.VolumeTransferManager(None), {
            'id': '99999999-8888-7777-6666-555555555555',
            'name': 'test transfer',
            'volume_id': volume.id,
            'auth_key': 'blah',
            'created_at': ''})
    TEST.cinder_volume_transfers.add(transfer_1)
    # Pools
    pool1 = pools.Pool(
        pools.PoolManager(None), {
            "QoS_support": False,
            "allocated_capacity_gb": 0,
            "driver_version": "3.0.0",
            "free_capacity_gb": 10,
            "extra_specs": {
                "description": "LVM Extra specs",
                "display_name": "LVMDriver",
                "namespace": "OS::Cinder::LVMDriver",
                "type": "object",
            },
            "name": "devstack@lvmdriver-1#lvmdriver-1",
            "pool_name": "lvmdriver-1",
            "reserved_percentage": 0,
            "storage_protocol": "iSCSI",
            "total_capacity_gb": 10,
            "vendor_name": "Open Source",
            "volume_backend_name": "lvmdriver-1"})
    pool2 = pools.Pool(
        pools.PoolManager(None), {
            "QoS_support": False,
            "allocated_capacity_gb": 2,
            "driver_version": "3.0.0",
            "free_capacity_gb": 5,
            "extra_specs": {
                "description": "LVM Extra specs",
                "display_name": "LVMDriver",
                "namespace": "OS::Cinder::LVMDriver",
                "type": "object",
            },
            "name": "devstack@lvmdriver-2#lvmdriver-2",
            "pool_name": "lvmdriver-2",
            "reserved_percentage": 0,
            "storage_protocol": "iSCSI",
            "total_capacity_gb": 10,
            "vendor_name": "Open Source",
            "volume_backend_name": "lvmdriver-2"})
    TEST.cinder_pools.add(pool1)
    TEST.cinder_pools.add(pool2)
    # volume consistency groups
    cgroup_1 = consistencygroups.Consistencygroup(
        consistencygroups.ConsistencygroupManager(None),
        {'id': u'1',
         'name': u'cg_1',
         'description': 'cg 1 description',
         'volume_types': ['1'],
         'volume_type_names': []})
    cgroup_2 = consistencygroups.Consistencygroup(
        consistencygroups.ConsistencygroupManager(None),
        {'id': u'2',
         'name': u'cg_2',
         'description': 'cg 2 description',
         'volume_types': ['1'],
         'volume_type_names': []})
    TEST.cinder_consistencygroups.add(cgroup_1)
    TEST.cinder_consistencygroups.add(cgroup_2)
    volume_for_consistency_group = volumes.Volume(
        volumes.VolumeManager(None),
        {'id': "11023e92-8008-4c8b-8059-7f2293ff3881",
         'status': 'available',
         'size': 40,
         'name': 'Volume name',
         'display_description': 'Volume description',
         'created_at': '2014-01-27 10:30:00',
         'volume_type': 'vol_type_1',
         'attachments': [],
         'consistencygroup_id': u'1'})
    TEST.cinder_cgroup_volumes.add(api.cinder.Volume(
        volume_for_consistency_group))
    # volume consistency group snapshots
    cg_snapshot_1 = cgsnapshots.Cgsnapshot(
        cgsnapshots.CgsnapshotManager(None),
        {'id': u'1',
         'name': u'cg_ss_1',
         'description': 'cg_ss 1 description',
         'consistencygroup_id': u'1'})
    TEST.cinder_cg_snapshots.add(cg_snapshot_1)
    group_type_1 = group_types.GroupType(
        group_types.GroupTypeManager(None),
        {
            "is_public": True,
            "group_specs": {},
            "id": "4645cbf7-8aa6-4d42-a5f7-24e6ebe5ba79",
            "name": "group-type-1",
            "description": None,
        })
    TEST.cinder_group_types.add(group_type_1)
    group_1 = groups.Group(
        groups.GroupManager(None),
        {
            "availability_zone": "nova",
            "created_at": "2018-01-09T07:27:22.000000",
            "description": "description for group1",
            "group_snapshot_id": None,
            "group_type": group_type_1.id,
            "id": "f64646ac-9bf7-483f-bd85-96c34050a528",
            "name": "group1",
            "replication_status": "disabled",
            "source_group_id": None,
            "status": "available",
            "volume_types": [
                vol_type1.id,
            ]
        })
    TEST.cinder_groups.add(cinder_api.Group(group_1))
    group_snapshot_1 = group_snapshots.GroupSnapshot(
        group_snapshots.GroupSnapshotManager(None),
        {
            "created_at": "2018-01-09T07:46:03.000000",
            "description": "",
            "group_id": group_1.id,
            "group_type_id": group_type_1.id,
            "id": "1036d913-9cb8-46a1-9f56-2f99dc1f14ed",
            "name": "group-snap1",
            "status": "available",
        })
    TEST.cinder_group_snapshots.add(group_snapshot_1)
    group_volume_1 = volumes.Volume(
        volumes.VolumeManager(None),
        {'id': "fe9a2664-0f49-4354-bab6-11b2ad352630",
         'status': 'available',
         'size': 2,
         'name': 'group1-volume1',
         'display_description': 'Volume 1 in Group 1',
         'created_at': '2014-01-27 10:30:00',
         'volume_type': 'vol_type_1',
         'group_id': group_1.id,
         'attachments': []})
    group_volume_2 = volumes.Volume(
        volumes.VolumeManager(None),
        {'id': "a7fb0402-88dc-45a3-970c-d732da63466e",
         'status': 'available',
         'size': 1,
         'name': 'group1-volume2',
         'display_description': 'Volume 2 in Group 1',
         'created_at': '2014-01-30 10:31:00',
         'volume_type': 'vol_type_1',
         'group_id': group_1.id,
         'attachments': []})
    TEST.cinder_group_volumes.add(group_volume_1)
    TEST.cinder_group_volumes.add(group_volume_2)
    snapshot5 = vol_snaps.Snapshot(
        vol_snaps.SnapshotManager(None),
        {'id': 'cd6be1eb-82ca-4587-8036-13c37c00c2b1',
         'name': '',
         'description': 'v2 volume snapshot with metadata description',
         'size': 80,
         'status': 'available',
         'volume_id': '7e4efa56-9ca1-45ff-b83c-2efb2383930d',
         'metadata': {'snapshot_meta_key': 'snapshot_meta_value'},
         'group_snapshot_id': group_snapshot_1.id})
    TEST.cinder_volume_snapshots_with_groups.add(
        api.cinder.VolumeSnapshot(snapshot5))
 | |
| 
	"""Slack platform for notify component."""
import asyncio
import logging
import os
from urllib.parse import urlparse
from aiohttp import BasicAuth, FormData
from aiohttp.client_exceptions import ClientError
from slack import WebClient
from slack.errors import SlackApiError
import voluptuous as vol
from homeassistant.components.notify import (
    ATTR_DATA,
    ATTR_TARGET,
    ATTR_TITLE,
    PLATFORM_SCHEMA,
    BaseNotificationService,
)
from homeassistant.const import CONF_API_KEY, CONF_ICON, CONF_USERNAME
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client, config_validation as cv
import homeassistant.helpers.template as template
_LOGGER = logging.getLogger(__name__)
ATTR_BLOCKS = "blocks"
ATTR_BLOCKS_TEMPLATE = "blocks_template"
ATTR_FILE = "file"
ATTR_ICON = "icon"
ATTR_PASSWORD = "password"
ATTR_PATH = "path"
ATTR_URL = "url"
ATTR_USERNAME = "username"
CONF_DEFAULT_CHANNEL = "default_channel"
DEFAULT_TIMEOUT_SECONDS = 15
FILE_PATH_SCHEMA = vol.Schema({vol.Required(ATTR_PATH): cv.isfile})
FILE_URL_SCHEMA = vol.Schema(
    {
        vol.Required(ATTR_URL): cv.url,
        vol.Inclusive(ATTR_USERNAME, "credentials"): cv.string,
        vol.Inclusive(ATTR_PASSWORD, "credentials"): cv.string,
    }
)
DATA_FILE_SCHEMA = vol.Schema(
    {vol.Required(ATTR_FILE): vol.Any(FILE_PATH_SCHEMA, FILE_URL_SCHEMA)}
)
DATA_TEXT_ONLY_SCHEMA = vol.Schema(
    {
        vol.Optional(ATTR_USERNAME): cv.string,
        vol.Optional(ATTR_ICON): cv.string,
        vol.Optional(ATTR_BLOCKS): list,
        vol.Optional(ATTR_BLOCKS_TEMPLATE): list,
    }
)
DATA_SCHEMA = vol.All(
    cv.ensure_list, [vol.Any(DATA_FILE_SCHEMA, DATA_TEXT_ONLY_SCHEMA)]
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
    {
        vol.Required(CONF_API_KEY): cv.string,
        vol.Required(CONF_DEFAULT_CHANNEL): cv.string,
        vol.Optional(CONF_ICON): cv.string,
        vol.Optional(CONF_USERNAME): cv.string,
    }
)
async def async_get_service(hass, config, discovery_info=None):
    """Set up the Slack notification service."""
    session = aiohttp_client.async_get_clientsession(hass)
    client = WebClient(token=config[CONF_API_KEY], run_async=True, session=session)
    try:
        await client.auth_test()
    except SlackApiError as err:
        _LOGGER.error("Error while setting up integration: %s", err)
        return
    return SlackNotificationService(
        hass,
        client,
        config[CONF_DEFAULT_CHANNEL],
        username=config.get(CONF_USERNAME),
        icon=config.get(CONF_ICON),
    )
@callback
def _async_get_filename_from_url(url):
    """Return the filename of a passed URL."""
    parsed_url = urlparse(url)
    return os.path.basename(parsed_url.path)
@callback
def _async_sanitize_channel_names(channel_list):
    """Remove any # symbols from a channel list."""
    return [channel.lstrip("#") for channel in channel_list]
@callback
def _async_templatize_blocks(hass, value):
    """Recursive template creator helper function."""
    if isinstance(value, list):
        return [_async_templatize_blocks(hass, item) for item in value]
    if isinstance(value, dict):
        return {
            key: _async_templatize_blocks(hass, item) for key, item in value.items()
        }
    tmpl = template.Template(value, hass=hass)
    return tmpl.async_render(parse_result=False)
class SlackNotificationService(BaseNotificationService):
    """Define the Slack notification logic."""
    def __init__(self, hass, client, default_channel, username, icon):
        """Initialize."""
        self._client = client
        self._default_channel = default_channel
        self._hass = hass
        self._icon = icon
        self._username = username
    async def _async_send_local_file_message(self, path, targets, message, title):
        """Upload a local file (with message) to Slack."""
        if not self._hass.config.is_allowed_path(path):
            _LOGGER.error("Path does not exist or is not allowed: %s", path)
            return
        parsed_url = urlparse(path)
        filename = os.path.basename(parsed_url.path)
        try:
            await self._client.files_upload(
                channels=",".join(targets),
                file=path,
                filename=filename,
                initial_comment=message,
                title=title or filename,
            )
        except SlackApiError as err:
            _LOGGER.error("Error while uploading file-based message: %s", err)
    async def _async_send_remote_file_message(
        self, url, targets, message, title, *, username=None, password=None
    ):
        """Upload a remote file (with message) to Slack.
        Note that we bypass the python-slackclient WebClient and use aiohttp directly,
        as the former would require us to download the entire remote file into memory
        first before uploading it to Slack.
        """
        if not self._hass.config.is_allowed_external_url(url):
            _LOGGER.error("URL is not allowed: %s", url)
            return
        filename = _async_get_filename_from_url(url)
        session = aiohttp_client.async_get_clientsession(self.hass)
        kwargs = {}
        if username and password is not None:
            kwargs = {"auth": BasicAuth(username, password=password)}
        resp = await session.request("get", url, **kwargs)
        try:
            resp.raise_for_status()
        except ClientError as err:
            _LOGGER.error("Error while retrieving %s: %s", url, err)
            return
        data = FormData(
            {
                "channels": ",".join(targets),
                "filename": filename,
                "initial_comment": message,
                "title": title or filename,
                "token": self._client.token,
            },
            charset="utf-8",
        )
        data.add_field("file", resp.content, filename=filename)
        try:
            await session.post("https://slack.com/api/files.upload", data=data)
        except ClientError as err:
            _LOGGER.error("Error while uploading file message: %s", err)
    async def _async_send_text_only_message(
        self,
        targets,
        message,
        title,
        *,
        username=None,
        icon=None,
        blocks=None,
    ):
        """Send a text-only message."""
        message_dict = {"link_names": True, "text": message}
        if username:
            message_dict["username"] = username
        if icon:
            if icon.lower().startswith(("http://", "https://")):
                icon_type = "url"
            else:
                icon_type = "emoji"
            message_dict[f"icon_{icon_type}"] = icon
        if blocks:
            message_dict["blocks"] = blocks
        tasks = {
            target: self._client.chat_postMessage(**message_dict, channel=target)
            for target in targets
        }
        results = await asyncio.gather(*tasks.values(), return_exceptions=True)
        for target, result in zip(tasks, results):
            if isinstance(result, SlackApiError):
                _LOGGER.error(
                    "There was a Slack API error while sending to %s: %s",
                    target,
                    result,
                )
    async def async_send_message(self, message, **kwargs):
        """Send a message to Slack."""
        data = kwargs.get(ATTR_DATA)
        if data is None:
            data = {}
        try:
            DATA_SCHEMA(data)
        except vol.Invalid as err:
            _LOGGER.error("Invalid message data: %s", err)
            data = {}
        title = kwargs.get(ATTR_TITLE)
        targets = _async_sanitize_channel_names(
            kwargs.get(ATTR_TARGET, [self._default_channel])
        )
        # Message Type 1: A text-only message
        if ATTR_FILE not in data:
            if ATTR_BLOCKS_TEMPLATE in data:
                blocks = _async_templatize_blocks(self.hass, data[ATTR_BLOCKS_TEMPLATE])
            elif ATTR_BLOCKS in data:
                blocks = data[ATTR_BLOCKS]
            else:
                blocks = None
            return await self._async_send_text_only_message(
                targets,
                message,
                title,
                username=data.get(ATTR_USERNAME, self._username),
                icon=data.get(ATTR_ICON, self._icon),
                blocks=blocks,
            )
        # Message Type 2: A message that uploads a remote file
        if ATTR_URL in data[ATTR_FILE]:
            return await self._async_send_remote_file_message(
                data[ATTR_FILE][ATTR_URL],
                targets,
                message,
                title,
                username=data[ATTR_FILE].get(ATTR_USERNAME),
                password=data[ATTR_FILE].get(ATTR_PASSWORD),
            )
        # Message Type 3: A message that uploads a local file
        return await self._async_send_local_file_message(
            data[ATTR_FILE][ATTR_PATH], targets, message, title
        )
 | |
| 
	# Copyright (c) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import mock
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib.plugins import directory
from oslo_utils import timeutils
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.common import utils
from neutron.db.agentschedulers_db import cfg
from neutron.db.models import agent as agent_model
from neutron.tests import base
class TestDhcpAgentNotifyAPI(base.BaseTestCase):
    def setUp(self):
        super(TestDhcpAgentNotifyAPI, self).setUp()
        self.notifier = (
            dhcp_rpc_agent_api.DhcpAgentNotifyAPI(plugin=mock.Mock()))
        mock_util_p = mock.patch.object(utils, 'is_extension_supported')
        mock_log_p = mock.patch.object(dhcp_rpc_agent_api, 'LOG')
        mock_fanout_p = mock.patch.object(self.notifier, '_fanout_message')
        mock_cast_p = mock.patch.object(self.notifier, '_cast_message')
        self.mock_util = mock_util_p.start()
        self.mock_log = mock_log_p.start()
        self.mock_fanout = mock_fanout_p.start()
        self.mock_cast = mock_cast_p.start()
    def _test__schedule_network(self, network,
                                new_agents=None, existing_agents=None,
                                expected_casts=0, expected_warnings=0):
        self.notifier.plugin.schedule_network.return_value = new_agents
        agents = self.notifier._schedule_network(
            mock.ANY, network, existing_agents)
        if new_agents is None:
            new_agents = []
        self.assertEqual(new_agents + existing_agents, agents)
        self.assertEqual(expected_casts, self.mock_cast.call_count)
        self.assertEqual(expected_warnings, self.mock_log.warning.call_count)
    def test__schedule_network(self):
        agent = agent_model.Agent()
        agent.admin_state_up = True
        agent.heartbeat_timestamp = timeutils.utcnow()
        network = {'id': 'foo_net_id'}
        self._test__schedule_network(network,
                                     new_agents=[agent], existing_agents=[],
                                     expected_casts=1, expected_warnings=0)
    def test__schedule_network_no_existing_agents(self):
        agent = agent_model.Agent()
        agent.admin_state_up = True
        agent.heartbeat_timestamp = timeutils.utcnow()
        network = {'id': 'foo_net_id'}
        self._test__schedule_network(network,
                                     new_agents=None, existing_agents=[agent],
                                     expected_casts=0, expected_warnings=0)
    def test__schedule_network_no_new_agents(self):
        network = {'id': 'foo_net_id'}
        self._test__schedule_network(network,
                                     new_agents=None, existing_agents=[],
                                     expected_casts=0, expected_warnings=1)
    def _test__get_enabled_agents(self, network,
                                  agents=None, port_count=0,
                                  expected_warnings=0, expected_errors=0):
        self.notifier.plugin.get_ports_count.return_value = port_count
        enabled_agents = self.notifier._get_enabled_agents(
            mock.ANY, network, agents, mock.ANY, mock.ANY)
        if not cfg.CONF.enable_services_on_agents_with_admin_state_down:
            agents = [x for x in agents if x.admin_state_up]
        self.assertEqual(agents, enabled_agents)
        self.assertEqual(expected_warnings, self.mock_log.warning.call_count)
        self.assertEqual(expected_errors, self.mock_log.error.call_count)
    def test__get_enabled_agents(self):
        agent1 = agent_model.Agent()
        agent1.admin_state_up = True
        agent1.heartbeat_timestamp = timeutils.utcnow()
        agent2 = agent_model.Agent()
        agent2.admin_state_up = False
        agent2.heartbeat_timestamp = timeutils.utcnow()
        network = {'id': 'foo_network_id'}
        self._test__get_enabled_agents(network, agents=[agent1])
    def test__get_enabled_agents_with_inactive_ones(self):
        agent1 = agent_model.Agent()
        agent1.admin_state_up = True
        agent1.heartbeat_timestamp = timeutils.utcnow()
        agent2 = agent_model.Agent()
        agent2.admin_state_up = True
        # This is effectively an inactive agent
        agent2.heartbeat_timestamp = datetime.datetime(2000, 1, 1, 0, 0)
        network = {'id': 'foo_network_id'}
        self._test__get_enabled_agents(network,
                                       agents=[agent1, agent2],
                                       expected_warnings=1, expected_errors=0)
    def test__get_enabled_agents_with_notification_required(self):
        network = {'id': 'foo_network_id', 'subnets': ['foo_subnet_id']}
        agent = agent_model.Agent()
        agent.admin_state_up = False
        agent.heartbeat_timestamp = timeutils.utcnow()
        self._test__get_enabled_agents(network, [agent], port_count=20,
                                       expected_warnings=0, expected_errors=1)
    def test__get_enabled_agents_with_admin_state_down(self):
        cfg.CONF.set_override(
            'enable_services_on_agents_with_admin_state_down', True)
        agent1 = agent_model.Agent()
        agent1.admin_state_up = True
        agent1.heartbeat_timestamp = timeutils.utcnow()
        agent2 = agent_model.Agent()
        agent2.admin_state_up = False
        agent2.heartbeat_timestamp = timeutils.utcnow()
        network = {'id': 'foo_network_id'}
        self._test__get_enabled_agents(network, agents=[agent1, agent2])
    def test__notify_agents_fanout_required(self):
        self.notifier._notify_agents(mock.ANY,
                                     'network_delete_end',
                                     mock.ANY, 'foo_network_id')
        self.assertEqual(1, self.mock_fanout.call_count)
    def _test__notify_agents_with_function(
        self, function, expected_scheduling=0, expected_casts=0):
        with mock.patch.object(self.notifier, '_schedule_network') as f:
            with mock.patch.object(self.notifier, '_get_enabled_agents') as g:
                agent = agent_model.Agent()
                agent.admin_state_up = True
                agent.heartbeat_timestamp = timeutils.utcnow()
                g.return_value = [agent]
                function()
                self.assertEqual(expected_scheduling, f.call_count)
                self.assertEqual(expected_casts, self.mock_cast.call_count)
    def _test__notify_agents(self, method,
                             expected_scheduling=0, expected_casts=0,
                             payload=None):
        payload = payload or {'port': {}}
        self._test__notify_agents_with_function(
            lambda: self.notifier._notify_agents(
                mock.Mock(), method, payload, 'foo_network_id'),
            expected_scheduling, expected_casts)
    def test__notify_agents_cast_required_with_scheduling(self):
        self._test__notify_agents('port_create_end',
                                  expected_scheduling=1, expected_casts=1)
    def test__notify_agents_cast_required_wo_scheduling_on_port_update(self):
        self._test__notify_agents('port_update_end',
                                  expected_scheduling=0, expected_casts=1)
    def test__notify_agents_cast_required_with_scheduling_subnet_create(self):
        self._test__notify_agents('subnet_create_end',
                                  expected_scheduling=1, expected_casts=1,
                                  payload={'subnet': {}})
    def test__notify_agents_cast_required_with_scheduling_segment(self):
        network_id = 'foo_network_id'
        segment_id = 'foo_segment_id'
        subnet = {'subnet': {'segment_id': segment_id}}
        segment = {'id': segment_id, 'network_id': network_id,
                   'hosts': ['host-a']}
        self.notifier.plugin.get_network.return_value = {'id': network_id}
        segment_sp = mock.Mock()
        segment_sp.get_segment.return_value = segment
        directory.add_plugin('segments', segment_sp)
        self._test__notify_agents('subnet_create_end',
                                  expected_scheduling=1, expected_casts=1,
                                  payload=subnet)
        get_agents = self.notifier.plugin.get_dhcp_agents_hosting_networks
        get_agents.assert_called_once_with(
            mock.ANY, [network_id], hosts=segment['hosts'])
    def test__notify_agents_no_action(self):
        self._test__notify_agents('network_create_end',
                                  expected_scheduling=0, expected_casts=0)
    def test__notify_agents_with_router_interface_add(self):
        self._test__notify_agents_with_function(
            lambda: self.notifier._after_router_interface_created(
                mock.ANY, mock.ANY, mock.ANY, context=mock.Mock(),
                port={'id': 'foo_port_id', 'network_id': 'foo_network_id'}),
            expected_scheduling=1, expected_casts=1)
    def test__notify_agents_with_router_interface_delete(self):
        self._test__notify_agents_with_function(
            lambda: self.notifier._after_router_interface_deleted(
                mock.ANY, mock.ANY, mock.ANY, context=mock.Mock(),
                port={'id': 'foo_port_id', 'network_id': 'foo_network_id'}),
            expected_scheduling=0, expected_casts=1)
    def test__fanout_message(self):
        self.notifier._fanout_message(mock.ANY, mock.ANY, mock.ANY)
        self.assertEqual(1, self.mock_fanout.call_count)
    def test__cast_message(self):
        self.notifier._cast_message(mock.ANY, mock.ANY, mock.ANY)
        self.assertEqual(1, self.mock_cast.call_count)
    def test__native_notification_unsubscribes(self):
        self.assertFalse(self.notifier._unsubscribed_resources)
        for res in (resources.PORT, resources.NETWORK, resources.SUBNET):
            self.notifier._unsubscribed_resources = []
            kwargs = {res: {}}
            registry.notify(res, events.AFTER_CREATE, self,
                            context=mock.Mock(), **kwargs)
            # don't unsubscribe until all three types are observed
            self.assertEqual([], self.notifier._unsubscribed_resources)
            registry.notify(res, events.AFTER_UPDATE, self,
                            context=mock.Mock(), **kwargs)
            self.assertEqual([], self.notifier._unsubscribed_resources)
            registry.notify(res, events.AFTER_DELETE, self,
                            context=mock.Mock(), **kwargs)
            self.assertEqual([res], self.notifier._unsubscribed_resources)
            # after first time, no further unsubscribing should happen
            registry.notify(res, events.AFTER_CREATE, self,
                            context=mock.Mock(), **kwargs)
            self.assertEqual([res], self.notifier._unsubscribed_resources)
    def test__only_status_changed(self):
        p1 = {'id': 1, 'status': 'DOWN', 'updated_at': '10:00:00',
              'revision_number': 1}
        p2 = dict(p1)
        p2['status'] = 'ACTIVE'
        p2['revision_number'] = 2
        p2['updated_at'] = '10:00:01'
        self.assertTrue(self.notifier._only_status_changed(p1, p2))
        p2['name'] = 'test'
        self.assertFalse(self.notifier._only_status_changed(p1, p2))
        p1['name'] = 'test'
        self.assertTrue(self.notifier._only_status_changed(p1, p2))
        p1['name'] = 'test1'
        self.assertFalse(self.notifier._only_status_changed(p1, p2))
 | |
| 
	
__all__ = [
  'SgAssetCreatedHandler',
  'SgHumanUserCreatedHandler',
  'SgPlaylistCreatedHandler',
  'SgProjectCreatedHandler',
  'SgProjectAssetCreatedHandler',
  'SgProjectPlaylistCreatedHandler',
  'SgProjectPublishedFileCreatedHandler',
  'SgProjectSequenceCreatedHandler',
  'SgProjectShotCreatedHandler',
  'SgProjectTaskCreatedHandler',
  'SgProjectVersionCreatedHandler',
  'SgPublishedFileCreatedHandler',
  'SgSequenceCreatedHandler',
  'SgShotCreatedHandler',
  'SgTaskCreatedHandler',
  'SgVersionCreatedHandler'
]
# Python imports
from abc import abstractmethod
# This module imports
import ShotgunORM
########################################################################
#
# Global Entity created handlers
#
########################################################################
class SgHumanUserCreatedHandler(ShotgunORM.SgEventHandler):
  '''
  '''
  def __init__(self):
    super(SgHumanUserCreatedHandler, self).__init__()
    self.addFilter(ShotgunORM.SgHumanUserCreatedFilter())
  @abstractmethod
  def processEvent(self, sgEvent):
    '''
    '''
    raise NotImplementedError()
class SgAssetCreatedHandler(ShotgunORM.SgEventHandler):
  '''
  '''
  def __init__(self):
    super(SgAssetCreatedHandler, self).__init__()
    self.addFilter(ShotgunORM.SgAssetCreatedFilter())
  @abstractmethod
  def processEvent(self, sgEvent):
    '''
    '''
    raise NotImplementedError()
class SgPlaylistCreatedHandler(ShotgunORM.SgEventHandler):
  '''
  '''
  def __init__(self):
    super(SgPlaylistCreatedHandler, self).__init__()
    self.addFilter(ShotgunORM.SgPlaylistCreatedFilter())
  @abstractmethod
  def processEvent(self, sgEvent):
    '''
    '''
    raise NotImplementedError()
class SgProjectCreatedHandler(ShotgunORM.SgEventHandler):
  '''
  '''
  def __init__(self):
    super(SgProjectCreatedHandler, self).__init__()
    self.addFilter(ShotgunORM.SgProjectCreatedFilter())
  @abstractmethod
  def processEvent(self, sgEvent):
    '''
    '''
    raise NotImplementedError()
class SgPublishedFileCreatedHandler(ShotgunORM.SgEventHandler):
  '''
  '''
  def __init__(self):
    super(SgPublishedFileCreatedHandler, self).__init__()
    self.addFilter(ShotgunORM.SgPublishedFileCreatedFilter())
  @abstractmethod
  def processEvent(self, sgEvent):
    '''
    '''
    raise NotImplementedError()
class SgSequenceCreatedHandler(ShotgunORM.SgEventHandler):
  '''
  '''
  def __init__(self):
    super(SgSequenceCreatedHandler, self).__init__()
    self.addFilter(ShotgunORM.SgSequenceCreatedFilter())
  @abstractmethod
  def processEvent(self, sgEvent):
    '''
    '''
    raise NotImplementedError()
class SgShotCreatedHandler(ShotgunORM.SgEventHandler):
  '''
  '''
  def __init__(self):
    super(SgShotCreatedHandler, self).__init__()
    self.addFilter(ShotgunORM.SgShotCreatedFilter())
  @abstractmethod
  def processEvent(self, sgEvent):
    '''
    '''
    raise NotImplementedError()
class SgVersionCreatedHandler(ShotgunORM.SgEventHandler):
  '''
  '''
  def __init__(self):
    super(SgVersionCreatedHandler, self).__init__()
    self.addFilter(ShotgunORM.SgVersionCreatedFilter())
  @abstractmethod
  def processEvent(self, sgEvent):
    '''
    '''
    raise NotImplementedError()
class SgTaskCreatedHandler(ShotgunORM.SgEventHandler):
  '''
  '''
  def __init__(self):
    super(SgTaskCreatedHandler, self).__init__()
    self.addFilter(ShotgunORM.SgTaskCreatedFilter())
  @abstractmethod
  def processEvent(self, sgEvent):
    '''
    '''
    raise NotImplementedError()
########################################################################
#
# Project specific Entity created handlers
#
########################################################################
class SgProjectAssetCreatedHandler(SgAssetCreatedHandler):
  '''
  '''
  def __init__(self, project):
    super(SgProjectAssetCreatedHandler, self).__init__()
    self._project = project
    self.addFilter(ShotgunORM.SgProjectFilter(project))
  @abstractmethod
  def processEvent(self, sgEvent):
    '''
    '''
    raise NotImplementedError()
  def project(self):
    '''
    '''
    return self._project
class SgProjectPlaylistCreatedHandler(SgPlaylistCreatedHandler):
  '''
  '''
  def __init__(self, project):
    super(SgProjectPlaylistCreatedHandler, self).__init__()
    self._project = project
    self.addFilter(ShotgunORM.SgProjectFilter(project))
  @abstractmethod
  def processEvent(self, sgEvent):
    '''
    '''
    raise NotImplementedError()
  def project(self):
    '''
    '''
    return self._project
class SgProjectPublishedFileCreatedHandler(SgPublishedFileCreatedHandler):
  '''
  '''
  def __init__(self, project):
    super(SgProjectPublishedFileCreatedHandler, self).__init__()
    self._project = project
    self.addFilter(ShotgunORM.SgProjectFilter(project))
  @abstractmethod
  def processEvent(self, sgEvent):
    '''
    '''
    raise NotImplementedError()
  def project(self):
    '''
    '''
    return self._project
class SgProjectSequenceCreatedHandler(SgSequenceCreatedHandler):
  '''
  '''
  def __init__(self, project):
    super(SgProjectSequenceCreatedHandler, self).__init__()
    self._project = project
    self.addFilter(ShotgunORM.SgProjectFilter(project))
  @abstractmethod
  def processEvent(self, sgEvent):
    '''
    '''
    raise NotImplementedError()
  def project(self):
    '''
    '''
    return self._project
class SgProjectShotCreatedHandler(SgShotCreatedHandler):
  '''
  '''
  def __init__(self, project):
    super(SgProjectShotCreatedHandler, self).__init__()
    self._project = project
    self.addFilter(ShotgunORM.SgProjectFilter(project))
  @abstractmethod
  def processEvent(self, sgEvent):
    '''
    '''
    raise NotImplementedError()
  def project(self):
    '''
    '''
    return self._project
class SgProjectTaskCreatedHandler(SgTaskCreatedHandler):
  '''
  '''
  def __init__(self, project):
    super(SgProjectTaskCreatedHandler, self).__init__()
    self._project = project
    self.addFilter(ShotgunORM.SgProjectFilter(project))
  @abstractmethod
  def processEvent(self, sgEvent):
    '''
    '''
    raise NotImplementedError()
  def project(self):
    '''
    '''
    return self._project
class SgProjectVersionCreatedHandler(SgVersionCreatedHandler):
  '''
  '''
  def __init__(self, project):
    super(SgProjectVersionCreatedHandler, self).__init__()
    self._project = project
    self.addFilter(ShotgunORM.SgProjectFilter(project))
  @abstractmethod
  def processEvent(self, sgEvent):
    '''
    '''
    raise NotImplementedError()
  def project(self):
    '''
    '''
    return self._project
 | |
| 
	# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import json
import os
import random
import socket
import time
from unittest2 import SkipTest
from xml.dom import minidom
import six
from six.moves import http_client
from six.moves import urllib
from swiftclient import get_auth
from swift.common import constraints
from swift.common.utils import config_true_value
from test import safe_repr
http_client._MAXHEADERS = constraints.MAX_HEADER_COUNT
class AuthenticationFailed(Exception):
    pass
class RequestError(Exception):
    pass
class ResponseError(Exception):
    def __init__(self, response, method=None, path=None):
        self.status = response.status
        self.reason = response.reason
        self.method = method
        self.path = path
        self.headers = response.getheaders()
        for name, value in self.headers:
            if name.lower() == 'x-trans-id':
                self.txid = value
                break
        else:
            self.txid = None
        super(ResponseError, self).__init__()
    def __str__(self):
        return repr(self)
    def __repr__(self):
        return '%d: %r (%r %r) txid=%s' % (
            self.status, self.reason, self.method, self.path, self.txid)
def listing_empty(method):
    for i in range(6):
        if len(method()) == 0:
            return True
        time.sleep(2 ** i)
    return False
def listing_items(method):
    marker = None
    once = True
    items = []
    while once or items:
        for i in items:
            yield i
        if once or marker:
            if marker:
                items = method(parms={'marker': marker})
            else:
                items = method()
            if len(items) == 10000:
                marker = items[-1]
            else:
                marker = None
            once = False
        else:
            items = []
class Connection(object):
    def __init__(self, config):
        for key in 'auth_host auth_port auth_ssl username password'.split():
            if key not in config:
                raise SkipTest(
                    "Missing required configuration parameter: %s" % key)
        self.auth_host = config['auth_host']
        self.auth_port = int(config['auth_port'])
        self.auth_ssl = config['auth_ssl'] in ('on', 'true', 'yes', '1')
        self.insecure = config_true_value(config.get('insecure', 'false'))
        self.auth_prefix = config.get('auth_prefix', '/')
        self.auth_version = str(config.get('auth_version', '1'))
        self.account = config.get('account')
        self.username = config['username']
        self.password = config['password']
        self.storage_host = None
        self.storage_port = None
        self.storage_url = None
        self.conn_class = None
    def get_account(self):
        return Account(self, self.account)
    def authenticate(self, clone_conn=None):
        if clone_conn:
            self.conn_class = clone_conn.conn_class
            self.storage_host = clone_conn.storage_host
            self.storage_url = clone_conn.storage_url
            self.storage_port = clone_conn.storage_port
            self.storage_token = clone_conn.storage_token
            return
        if self.auth_version == "1":
            auth_path = '%sv1.0' % (self.auth_prefix)
            if self.account:
                auth_user = '%s:%s' % (self.account, self.username)
            else:
                auth_user = self.username
        else:
            auth_user = self.username
            auth_path = self.auth_prefix
        auth_scheme = 'https://' if self.auth_ssl else 'http://'
        auth_netloc = "%s:%d" % (self.auth_host, self.auth_port)
        auth_url = auth_scheme + auth_netloc + auth_path
        authargs = dict(snet=False, tenant_name=self.account,
                        auth_version=self.auth_version, os_options={},
                        insecure=self.insecure)
        (storage_url, storage_token) = get_auth(
            auth_url, auth_user, self.password, **authargs)
        if not (storage_url and storage_token):
            raise AuthenticationFailed()
        x = storage_url.split('/')
        if x[0] == 'http:':
            self.conn_class = http_client.HTTPConnection
            self.storage_port = 80
        elif x[0] == 'https:':
            self.conn_class = http_client.HTTPSConnection
            self.storage_port = 443
        else:
            raise ValueError('unexpected protocol %s' % (x[0]))
        self.storage_host = x[2].split(':')[0]
        if ':' in x[2]:
            self.storage_port = int(x[2].split(':')[1])
        # Make sure storage_url is a string and not unicode, since
        # keystoneclient (called by swiftclient) returns them in
        # unicode and this would cause troubles when doing
        # no_safe_quote query.
        self.storage_url = str('/%s/%s' % (x[3], x[4]))
        self.account_name = str(x[4])
        self.auth_user = auth_user
        # With v2 keystone, storage_token is unicode.
        # We want it to be string otherwise this would cause
        # troubles when doing query with already encoded
        # non ascii characters in its headers.
        self.storage_token = str(storage_token)
        self.user_acl = '%s:%s' % (self.account, self.username)
        self.http_connect()
        return self.storage_url, self.storage_token
    def cluster_info(self):
        """
        Retrieve the data in /info, or {} on 404
        """
        status = self.make_request('GET', '/info',
                                   cfg={'absolute_path': True})
        if status // 100 == 4:
            return {}
        if not 200 <= status <= 299:
            raise ResponseError(self.response, 'GET', '/info')
        return json.loads(self.response.read())
    def http_connect(self):
        self.connection = self.conn_class(self.storage_host,
                                          port=self.storage_port)
        # self.connection.set_debuglevel(3)
    def make_path(self, path=None, cfg=None):
        if path is None:
            path = []
        if cfg is None:
            cfg = {}
        if cfg.get('version_only_path'):
            return '/' + self.storage_url.split('/')[1]
        if path:
            quote = urllib.parse.quote
            if cfg.get('no_quote') or cfg.get('no_path_quote'):
                quote = lambda x: x
            return '%s/%s' % (self.storage_url,
                              '/'.join([quote(i) for i in path]))
        else:
            return self.storage_url
    def make_headers(self, hdrs, cfg=None):
        if cfg is None:
            cfg = {}
        headers = {}
        if not cfg.get('no_auth_token'):
            headers['X-Auth-Token'] = self.storage_token
        if cfg.get('use_token'):
            headers['X-Auth-Token'] = cfg.get('use_token')
        if isinstance(hdrs, dict):
            headers.update(hdrs)
        return headers
    def make_request(self, method, path=None, data='', hdrs=None, parms=None,
                     cfg=None):
        if path is None:
            path = []
        if hdrs is None:
            hdrs = {}
        if parms is None:
            parms = {}
        if cfg is None:
            cfg = {}
        if not cfg.get('absolute_path'):
            # Set absolute_path=True to make a request to exactly the given
            # path, not storage path + given path. Useful for
            # non-account/container/object requests.
            path = self.make_path(path, cfg=cfg)
        headers = self.make_headers(hdrs, cfg=cfg)
        if isinstance(parms, dict) and parms:
            quote = urllib.parse.quote
            if cfg.get('no_quote') or cfg.get('no_parms_quote'):
                quote = lambda x: x
            query_args = ['%s=%s' % (quote(x), quote(str(y)))
                          for (x, y) in parms.items()]
            path = '%s?%s' % (path, '&'.join(query_args))
        if not cfg.get('no_content_length'):
            if cfg.get('set_content_length'):
                headers['Content-Length'] = cfg.get('set_content_length')
            else:
                headers['Content-Length'] = len(data)
        def try_request():
            self.http_connect()
            self.connection.request(method, path, data, headers)
            return self.connection.getresponse()
        self.response = None
        try_count = 0
        fail_messages = []
        while try_count < 5:
            try_count += 1
            try:
                self.response = try_request()
            except http_client.HTTPException as e:
                fail_messages.append(safe_repr(e))
                continue
            if self.response.status == 401:
                fail_messages.append("Response 401")
                self.authenticate()
                continue
            elif self.response.status == 503:
                fail_messages.append("Response 503")
                if try_count != 5:
                    time.sleep(5)
                continue
            break
        if self.response:
            return self.response.status
        request = "{method} {path} headers: {headers} data: {data}".format(
            method=method, path=path, headers=headers, data=data)
        raise RequestError('Unable to complete http request: %s. '
                           'Attempts: %s, Failures: %s' %
                           (request, len(fail_messages), fail_messages))
    def put_start(self, path, hdrs=None, parms=None, cfg=None, chunked=False):
        if hdrs is None:
            hdrs = {}
        if parms is None:
            parms = {}
        if cfg is None:
            cfg = {}
        self.http_connect()
        path = self.make_path(path, cfg)
        headers = self.make_headers(hdrs, cfg=cfg)
        if chunked:
            headers['Transfer-Encoding'] = 'chunked'
            headers.pop('Content-Length', None)
        if isinstance(parms, dict) and parms:
            quote = urllib.parse.quote
            if cfg.get('no_quote') or cfg.get('no_parms_quote'):
                quote = lambda x: x
            query_args = ['%s=%s' % (quote(x), quote(str(y)))
                          for (x, y) in parms.items()]
            path = '%s?%s' % (path, '&'.join(query_args))
        self.connection = self.conn_class(self.storage_host,
                                          port=self.storage_port)
        # self.connection.set_debuglevel(3)
        self.connection.putrequest('PUT', path)
        for key, value in headers.items():
            self.connection.putheader(key, value)
        self.connection.endheaders()
    def put_data(self, data, chunked=False):
        if chunked:
            self.connection.send('%x\r\n%s\r\n' % (len(data), data))
        else:
            self.connection.send(data)
    def put_end(self, chunked=False):
        if chunked:
            self.connection.send('0\r\n\r\n')
        self.response = self.connection.getresponse()
        self.connection.close()
        return self.response.status
class Base(object):
    def __str__(self):
        return self.name
    def header_fields(self, required_fields, optional_fields=None):
        if optional_fields is None:
            optional_fields = ()
        def is_int_header(header):
            if header.startswith('x-account-storage-policy-') and \
                    header.endswith(('-bytes-used', '-object-count')):
                return True
            return header in (
                'content-length',
                'x-account-container-count',
                'x-account-object-count',
                'x-account-bytes-used',
                'x-container-object-count',
                'x-container-bytes-used',
            )
        headers = dict(self.conn.response.getheaders())
        ret = {}
        for return_key, header in required_fields:
            if header not in headers:
                raise ValueError("%s was not found in response header" %
                                 (header,))
            if is_int_header(header):
                ret[return_key] = int(headers[header])
            else:
                ret[return_key] = headers[header]
        for return_key, header in optional_fields:
            if header not in headers:
                continue
            if is_int_header(header):
                ret[return_key] = int(headers[header])
            else:
                ret[return_key] = headers[header]
        return ret
class Account(Base):
    def __init__(self, conn, name):
        self.conn = conn
        self.name = str(name)
    def update_metadata(self, metadata=None, cfg=None):
        if metadata is None:
            metadata = {}
        if cfg is None:
            cfg = {}
        headers = dict(("X-Account-Meta-%s" % k, v)
                       for k, v in metadata.items())
        self.conn.make_request('POST', self.path, hdrs=headers, cfg=cfg)
        if not 200 <= self.conn.response.status <= 299:
            raise ResponseError(self.conn.response, 'POST',
                                self.conn.make_path(self.path))
        return True
    def container(self, container_name):
        return Container(self.conn, self.name, container_name)
    def containers(self, hdrs=None, parms=None, cfg=None):
        if hdrs is None:
            hdrs = {}
        if parms is None:
            parms = {}
        if cfg is None:
            cfg = {}
        format_type = parms.get('format', None)
        if format_type not in [None, 'json', 'xml']:
            raise RequestError('Invalid format: %s' % format_type)
        if format_type is None and 'format' in parms:
            del parms['format']
        status = self.conn.make_request('GET', self.path, hdrs=hdrs,
                                        parms=parms, cfg=cfg)
        if status == 200:
            if format_type == 'json':
                conts = json.loads(self.conn.response.read())
                for cont in conts:
                    cont['name'] = cont['name'].encode('utf-8')
                return conts
            elif format_type == 'xml':
                conts = []
                tree = minidom.parseString(self.conn.response.read())
                for x in tree.getElementsByTagName('container'):
                    cont = {}
                    for key in ['name', 'count', 'bytes', 'last_modified']:
                        cont[key] = x.getElementsByTagName(key)[0].\
                            childNodes[0].nodeValue
                    conts.append(cont)
                for cont in conts:
                    cont['name'] = cont['name'].encode('utf-8')
                return conts
            else:
                lines = self.conn.response.read().split('\n')
                if lines and not lines[-1]:
                    lines = lines[:-1]
                return lines
        elif status == 204:
            return []
        raise ResponseError(self.conn.response, 'GET',
                            self.conn.make_path(self.path))
    def delete_containers(self):
        for c in listing_items(self.containers):
            cont = self.container(c)
            cont.update_metadata(hdrs={'x-versions-location': ''})
            if not cont.delete_recursive():
                return False
        return listing_empty(self.containers)
    def info(self, hdrs=None, parms=None, cfg=None):
        if hdrs is None:
            hdrs = {}
        if parms is None:
            parms = {}
        if cfg is None:
            cfg = {}
        if self.conn.make_request('HEAD', self.path, hdrs=hdrs,
                                  parms=parms, cfg=cfg) != 204:
            raise ResponseError(self.conn.response, 'HEAD',
                                self.conn.make_path(self.path))
        fields = [['object_count', 'x-account-object-count'],
                  ['container_count', 'x-account-container-count'],
                  ['bytes_used', 'x-account-bytes-used']]
        optional_fields = [
            ['temp-url-key', 'x-account-meta-temp-url-key'],
            ['temp-url-key-2', 'x-account-meta-temp-url-key-2']]
        return self.header_fields(fields, optional_fields=optional_fields)
    @property
    def path(self):
        return []
class Container(Base):
    # policy_specified is set in __init__.py when tests are being set up.
    policy_specified = None
    def __init__(self, conn, account, name):
        self.conn = conn
        self.account = str(account)
        self.name = str(name)
    def create(self, hdrs=None, parms=None, cfg=None):
        if hdrs is None:
            hdrs = {}
        if parms is None:
            parms = {}
        if cfg is None:
            cfg = {}
        if self.policy_specified and 'X-Storage-Policy' not in hdrs:
            hdrs['X-Storage-Policy'] = self.policy_specified
        return self.conn.make_request('PUT', self.path, hdrs=hdrs,
                                      parms=parms, cfg=cfg) in (201, 202)
    def update_metadata(self, hdrs=None, cfg=None):
        if hdrs is None:
            hdrs = {}
        if cfg is None:
            cfg = {}
        self.conn.make_request('POST', self.path, hdrs=hdrs, cfg=cfg)
        if not 200 <= self.conn.response.status <= 299:
            raise ResponseError(self.conn.response, 'POST',
                                self.conn.make_path(self.path))
        return True
    def delete(self, hdrs=None, parms=None):
        if hdrs is None:
            hdrs = {}
        if parms is None:
            parms = {}
        return self.conn.make_request('DELETE', self.path, hdrs=hdrs,
                                      parms=parms) == 204
    def delete_files(self):
        for f in listing_items(self.files):
            file_item = self.file(f)
            if not file_item.delete():
                return False
        return listing_empty(self.files)
    def delete_recursive(self):
        return self.delete_files() and self.delete()
    def file(self, file_name):
        return File(self.conn, self.account, self.name, file_name)
    def files(self, hdrs=None, parms=None, cfg=None):
        if hdrs is None:
            hdrs = {}
        if parms is None:
            parms = {}
        if cfg is None:
            cfg = {}
        format_type = parms.get('format', None)
        if format_type not in [None, 'json', 'xml']:
            raise RequestError('Invalid format: %s' % format_type)
        if format_type is None and 'format' in parms:
            del parms['format']
        status = self.conn.make_request('GET', self.path, hdrs=hdrs,
                                        parms=parms, cfg=cfg)
        if status == 200:
            if format_type == 'json':
                files = json.loads(self.conn.response.read())
                for file_item in files:
                    for key in ('name', 'subdir', 'content_type'):
                        if key in file_item:
                            file_item[key] = file_item[key].encode('utf-8')
                return files
            elif format_type == 'xml':
                files = []
                tree = minidom.parseString(self.conn.response.read())
                container = tree.getElementsByTagName('container')[0]
                for x in container.childNodes:
                    file_item = {}
                    if x.tagName == 'object':
                        for key in ['name', 'hash', 'bytes', 'content_type',
                                    'last_modified']:
                            file_item[key] = x.getElementsByTagName(key)[0].\
                                childNodes[0].nodeValue
                    elif x.tagName == 'subdir':
                        file_item['subdir'] = x.getElementsByTagName(
                            'name')[0].childNodes[0].nodeValue
                    else:
                        raise ValueError('Found unexpected element %s'
                                         % x.tagName)
                    files.append(file_item)
                for file_item in files:
                    if 'subdir' in file_item:
                        file_item['subdir'] = file_item['subdir'].\
                            encode('utf-8')
                    else:
                        file_item['name'] = file_item['name'].encode('utf-8')
                        file_item['content_type'] = file_item['content_type'].\
                            encode('utf-8')
                        file_item['bytes'] = int(file_item['bytes'])
                return files
            else:
                content = self.conn.response.read()
                if content:
                    lines = content.split('\n')
                    if lines and not lines[-1]:
                        lines = lines[:-1]
                    return lines
                else:
                    return []
        elif status == 204:
            return []
        raise ResponseError(self.conn.response, 'GET',
                            self.conn.make_path(self.path))
    def info(self, hdrs=None, parms=None, cfg=None):
        if hdrs is None:
            hdrs = {}
        if parms is None:
            parms = {}
        if cfg is None:
            cfg = {}
        self.conn.make_request('HEAD', self.path, hdrs=hdrs,
                               parms=parms, cfg=cfg)
        if self.conn.response.status == 204:
            required_fields = [['bytes_used', 'x-container-bytes-used'],
                               ['object_count', 'x-container-object-count'],
                               ['last_modified', 'last-modified']]
            optional_fields = [
                # N.B. swift doesn't return both x-versions-location
                # and x-history-location at a response so that this is safe
                # using same variable "versions" for both and it means
                # versioning is enabled.
                ['versions', 'x-versions-location'],
                ['versions', 'x-history-location'],
                ['tempurl_key', 'x-container-meta-temp-url-key'],
                ['tempurl_key2', 'x-container-meta-temp-url-key-2']]
            return self.header_fields(required_fields, optional_fields)
        raise ResponseError(self.conn.response, 'HEAD',
                            self.conn.make_path(self.path))
    @property
    def path(self):
        return [self.name]
class File(Base):
    def __init__(self, conn, account, container, name):
        self.conn = conn
        self.account = str(account)
        self.container = str(container)
        self.name = str(name)
        self.chunked_write_in_progress = False
        self.content_type = None
        self.content_range = None
        self.size = None
        self.metadata = {}
    def make_headers(self, cfg=None):
        if cfg is None:
            cfg = {}
        headers = {}
        if not cfg.get('no_content_length'):
            if cfg.get('set_content_length'):
                headers['Content-Length'] = cfg.get('set_content_length')
            elif self.size:
                headers['Content-Length'] = self.size
            else:
                headers['Content-Length'] = 0
        if cfg.get('use_token'):
            headers['X-Auth-Token'] = cfg.get('use_token')
        if cfg.get('no_content_type'):
            pass
        elif self.content_type:
            headers['Content-Type'] = self.content_type
        else:
            headers['Content-Type'] = 'application/octet-stream'
        for key in self.metadata:
            headers['X-Object-Meta-' + key] = self.metadata[key]
        return headers
    @classmethod
    def compute_md5sum(cls, data):
        block_size = 4096
        if isinstance(data, str):
            data = six.StringIO(data)
        checksum = hashlib.md5()
        buff = data.read(block_size)
        while buff:
            checksum.update(buff)
            buff = data.read(block_size)
        data.seek(0)
        return checksum.hexdigest()
    def copy(self, dest_cont, dest_file, hdrs=None, parms=None, cfg=None):
        if hdrs is None:
            hdrs = {}
        if parms is None:
            parms = {}
        if cfg is None:
            cfg = {}
        if 'destination' in cfg:
            headers = {'Destination': cfg['destination']}
        elif cfg.get('no_destination'):
            headers = {}
        else:
            headers = {'Destination': '%s/%s' % (dest_cont, dest_file)}
        headers.update(hdrs)
        if 'Destination' in headers:
            headers['Destination'] = urllib.parse.quote(headers['Destination'])
        if self.conn.make_request('COPY', self.path, hdrs=headers,
                                  cfg=cfg, parms=parms) != 201:
            raise ResponseError(self.conn.response, 'COPY',
                                self.conn.make_path(self.path))
        return True
    def copy_account(self, dest_account, dest_cont, dest_file,
                     hdrs=None, parms=None, cfg=None):
        if hdrs is None:
            hdrs = {}
        if parms is None:
            parms = {}
        if cfg is None:
            cfg = {}
        if 'destination' in cfg:
            headers = {'Destination': cfg['destination']}
        elif cfg.get('no_destination'):
            headers = {}
        else:
            headers = {'Destination-Account': dest_account,
                       'Destination': '%s/%s' % (dest_cont, dest_file)}
        headers.update(hdrs)
        if 'Destination-Account' in headers:
            headers['Destination-Account'] = \
                urllib.parse.quote(headers['Destination-Account'])
        if 'Destination' in headers:
            headers['Destination'] = urllib.parse.quote(headers['Destination'])
        if self.conn.make_request('COPY', self.path, hdrs=headers,
                                  cfg=cfg, parms=parms) != 201:
            raise ResponseError(self.conn.response, 'COPY',
                                self.conn.make_path(self.path))
        return True
    def delete(self, hdrs=None, parms=None, cfg=None):
        if hdrs is None:
            hdrs = {}
        if parms is None:
            parms = {}
        if self.conn.make_request('DELETE', self.path, hdrs=hdrs,
                                  cfg=cfg, parms=parms) != 204:
            raise ResponseError(self.conn.response, 'DELETE',
                                self.conn.make_path(self.path))
        return True
    def info(self, hdrs=None, parms=None, cfg=None):
        if hdrs is None:
            hdrs = {}
        if parms is None:
            parms = {}
        if cfg is None:
            cfg = {}
        if self.conn.make_request('HEAD', self.path, hdrs=hdrs,
                                  parms=parms, cfg=cfg) != 200:
            raise ResponseError(self.conn.response, 'HEAD',
                                self.conn.make_path(self.path))
        fields = [['content_length', 'content-length'],
                  ['content_type', 'content-type'],
                  ['last_modified', 'last-modified'],
                  ['etag', 'etag']]
        optional_fields = [['x_object_manifest', 'x-object-manifest']]
        header_fields = self.header_fields(fields,
                                           optional_fields=optional_fields)
        header_fields['etag'] = header_fields['etag'].strip('"')
        return header_fields
    def initialize(self, hdrs=None, parms=None):
        if hdrs is None:
            hdrs = {}
        if parms is None:
            parms = {}
        if not self.name:
            return False
        status = self.conn.make_request('HEAD', self.path, hdrs=hdrs,
                                        parms=parms)
        if status == 404:
            return False
        elif (status < 200) or (status > 299):
            raise ResponseError(self.conn.response, 'HEAD',
                                self.conn.make_path(self.path))
        for hdr in self.conn.response.getheaders():
            if hdr[0].lower() == 'content-type':
                self.content_type = hdr[1]
            if hdr[0].lower().startswith('x-object-meta-'):
                self.metadata[hdr[0][14:]] = hdr[1]
            if hdr[0].lower() == 'etag':
                self.etag = hdr[1].strip('"')
            if hdr[0].lower() == 'content-length':
                self.size = int(hdr[1])
            if hdr[0].lower() == 'last-modified':
                self.last_modified = hdr[1]
        return True
    def load_from_filename(self, filename, callback=None):
        fobj = open(filename, 'rb')
        self.write(fobj, callback=callback)
        fobj.close()
    @property
    def path(self):
        return [self.container, self.name]
    @classmethod
    def random_data(cls, size=None):
        if size is None:
            size = random.randint(1, 32768)
        fd = open('/dev/urandom', 'r')
        data = fd.read(size)
        fd.close()
        return data
    def read(self, size=-1, offset=0, hdrs=None, buffer=None,
             callback=None, cfg=None, parms=None):
        if cfg is None:
            cfg = {}
        if parms is None:
            parms = {}
        if size > 0:
            range_string = 'bytes=%d-%d' % (offset, (offset + size) - 1)
            if hdrs:
                hdrs['Range'] = range_string
            else:
                hdrs = {'Range': range_string}
        status = self.conn.make_request('GET', self.path, hdrs=hdrs,
                                        cfg=cfg, parms=parms)
        if (status < 200) or (status > 299):
            raise ResponseError(self.conn.response, 'GET',
                                self.conn.make_path(self.path))
        for hdr in self.conn.response.getheaders():
            if hdr[0].lower() == 'content-type':
                self.content_type = hdr[1]
            if hdr[0].lower() == 'content-range':
                self.content_range = hdr[1]
        if hasattr(buffer, 'write'):
            scratch = self.conn.response.read(8192)
            transferred = 0
            while len(scratch) > 0:
                buffer.write(scratch)
                transferred += len(scratch)
                if callable(callback):
                    callback(transferred, self.size)
                scratch = self.conn.response.read(8192)
            return None
        else:
            return self.conn.response.read()
    def read_md5(self):
        status = self.conn.make_request('GET', self.path)
        if (status < 200) or (status > 299):
            raise ResponseError(self.conn.response, 'GET',
                                self.conn.make_path(self.path))
        checksum = hashlib.md5()
        scratch = self.conn.response.read(8192)
        while len(scratch) > 0:
            checksum.update(scratch)
            scratch = self.conn.response.read(8192)
        return checksum.hexdigest()
    def save_to_filename(self, filename, callback=None):
        try:
            fobj = open(filename, 'wb')
            self.read(buffer=fobj, callback=callback)
        finally:
            fobj.close()
    def sync_metadata(self, metadata=None, cfg=None, parms=None):
        if cfg is None:
            cfg = {}
        self.metadata = self.metadata if metadata is None else metadata
        if self.metadata:
            headers = self.make_headers(cfg=cfg)
            if not cfg.get('no_content_length'):
                if cfg.get('set_content_length'):
                    headers['Content-Length'] = \
                        cfg.get('set_content_length')
                else:
                    headers['Content-Length'] = 0
            self.conn.make_request('POST', self.path, hdrs=headers,
                                   parms=parms, cfg=cfg)
            if self.conn.response.status not in (201, 202):
                raise ResponseError(self.conn.response, 'POST',
                                    self.conn.make_path(self.path))
        return True
    def chunked_write(self, data=None, hdrs=None, parms=None, cfg=None):
        if hdrs is None:
            hdrs = {}
        if parms is None:
            parms = {}
        if cfg is None:
            cfg = {}
        if data is not None and self.chunked_write_in_progress:
            self.conn.put_data(data, True)
        elif data is not None:
            self.chunked_write_in_progress = True
            headers = self.make_headers(cfg=cfg)
            headers.update(hdrs)
            self.conn.put_start(self.path, hdrs=headers, parms=parms,
                                cfg=cfg, chunked=True)
            self.conn.put_data(data, True)
        elif self.chunked_write_in_progress:
            self.chunked_write_in_progress = False
            return self.conn.put_end(True) == 201
        else:
            raise RuntimeError
    def write(self, data='', hdrs=None, parms=None, callback=None, cfg=None,
              return_resp=False):
        if hdrs is None:
            hdrs = {}
        if parms is None:
            parms = {}
        if cfg is None:
            cfg = {}
        block_size = 2 ** 20
        if isinstance(data, file):
            try:
                data.flush()
                data.seek(0)
            except IOError:
                pass
            self.size = int(os.fstat(data.fileno())[6])
        else:
            data = six.StringIO(data)
            self.size = data.len
        headers = self.make_headers(cfg=cfg)
        headers.update(hdrs)
        self.conn.put_start(self.path, hdrs=headers, parms=parms, cfg=cfg)
        transferred = 0
        buff = data.read(block_size)
        buff_len = len(buff)
        try:
            while buff_len > 0:
                self.conn.put_data(buff)
                transferred += buff_len
                if callable(callback):
                    callback(transferred, self.size)
                buff = data.read(block_size)
                buff_len = len(buff)
            self.conn.put_end()
        except socket.timeout as err:
            raise err
        if (self.conn.response.status < 200) or \
           (self.conn.response.status > 299):
            raise ResponseError(self.conn.response, 'PUT',
                                self.conn.make_path(self.path))
        try:
            data.seek(0)
        except IOError:
            pass
        self.md5 = self.compute_md5sum(data)
        if return_resp:
            return self.conn.response
        return True
    def write_random(self, size=None, hdrs=None, parms=None, cfg=None):
        if hdrs is None:
            hdrs = {}
        if parms is None:
            parms = {}
        if cfg is None:
            cfg = {}
        data = self.random_data(size)
        if not self.write(data, hdrs=hdrs, parms=parms, cfg=cfg):
            raise ResponseError(self.conn.response, 'PUT',
                                self.conn.make_path(self.path))
        self.md5 = self.compute_md5sum(six.StringIO(data))
        return data
    def write_random_return_resp(self, size=None, hdrs=None, parms=None,
                                 cfg=None):
        if hdrs is None:
            hdrs = {}
        if parms is None:
            parms = {}
        if cfg is None:
            cfg = {}
        data = self.random_data(size)
        resp = self.write(data, hdrs=hdrs, parms=parms, cfg=cfg,
                          return_resp=True)
        if not resp:
            raise ResponseError(self.conn.response)
        self.md5 = self.compute_md5sum(six.StringIO(data))
        return resp
    def post(self, hdrs=None, parms=None, cfg=None, return_resp=False):
        if hdrs is None:
            hdrs = {}
        if parms is None:
            parms = {}
        if cfg is None:
            cfg = {}
        headers = self.make_headers(cfg=cfg)
        headers.update(hdrs)
        self.conn.make_request('POST', self.path, hdrs=headers,
                               parms=parms, cfg=cfg)
        if self.conn.response.status not in (201, 202):
            raise ResponseError(self.conn.response, 'POST',
                                self.conn.make_path(self.path))
        if return_resp:
            return self.conn.response
        return True
 | |
| 
	from __future__ import print_function
import functools
import linecache
import numpy
import os
import sys
import warnings
from types import NoneType
# FIXME from ... import wrap
### printing
def strc(arg,n=10):
    """Compact version of `str`."""
    if isinstance(arg,float):
        return "%.3g"%arg
    if type(arg)==list:
        return "[%s|%d]"%(",".join([strc(x) for x in arg[:3]]),len(arg))
    if type(arg)==numpy.ndarray:
        return "<ndarray-%x %s %s [%s,%s]>"%(id(arg),arg.shape,str(arg.dtype),numpy.amin(arg),numpy.amax(arg))
    return str(arg).replace("\n"," ")
### deprecation warnings
def deprecated(f):
    """Prints a deprecation warning when called."""
    @functools.wraps(f)
    def wrapper(*args,**kw):
        warnings.warn_explicit("calling deprecated function %s"%f.__name__,
                               category=DeprecationWarning,
                               filename=f.func_code.co_filename,
                               lineno=f.func_code.co_firstlineno+1)
        return f(*args,**kw)
    return wrapper
def failfunc(f):
    @functools.wraps(f)
    def wrapper(*args,**kw):
        raise Exception("don't call %s anymore"%f)
    return wrapper
obsolete = failfunc
### debugging / tracing
_trace1_depth = 0
def trace1(f):
    """Print arguments/return values for the decorated function before each call."""
    name = f.func_name
    argnames = f.func_code.co_varnames[:f.func_code.co_argcount]
    @functools.wraps(f)
    def wrapper(*args,**kw):
        try:
            global _trace1_depth
            _trace1_depth += 1
            print(" " * _trace1_depth, "ENTER", name, ":", end=' ')
            for k,v in zip(argnames,args)+kw.items():
                print("%s=%s" % (k, strc(v)), end=' ')
            print()
            result = f(*args,**kw)
            print(" " * _trace1_depth, "LEAVE", name, ":", strc(result))
            return result
        except Exception as e:
            print(" " * _trace1_depth, "ERROR", name, ":", e)
            raise
        finally:
            _trace1_depth -= 1
    return wrapper
def tracing(f):
    """Enable tracing just within a function call."""
    def globaltrace(frame,why,arg):
        if why == "call": return localtrace
        return None
    def localtrace(frame, why, arg):
        if why == "line":
            fname = frame.f_code.co_filename
            lineno = frame.f_lineno
            base = os.path.basename(fname)
            print("%s(%s): %s" % (base, lineno,
                                  linecache.getline(fname, lineno)))
        return localtrace
    @wrap(f)
    def wrapper(*args,**kw):
        sys.settrace(globaltrace)
        result = f(*args,**kw)
        sys.settrace(None)
        return result
    return wrapper
def method(cls):
    """Adds the function as a method to the given class."""
    import new
    def _wrap(f):
        cls.__dict__[f.func_name] = new.instancemethod(f,None,cls)
        return None
    return _wrap
def unchanged(f):
    "This decorator doesn't add any behavior"
    return f
def disabled(value=None):
    """Disables the function so that it does nothing.  Optionally
    returns the given value."""
    def wrapper(f):
        @functools.wraps(f)
        def g(*args,**kw):
            return value
        return g
    return wrapper
def replacedby(g):
    """Replace the function with another function."""
    def wrapper(f):
        @functools.wraps(f)
        def wrapped(*args,**kw):
            return g(*args,**kw)
        return wrapped
    return wrapper
### type and range checks for arguments and return values
class CheckError(Exception):
    def __init__(self,*args,**kw):
        self.fun = kw.get("fun","?")
        self.var = kw.get("var","?")
        self.description = " ".join([strc(x) for x in args])
    def __str__(self):
        result = "\nCheckError for argument "
        result += str(self.var)
        result += " of function "
        result += str(self.fun)
        result += "\n"
        result += self.description
        return result
class CheckWarning(CheckError):
    def __init__(self,*args,**kw):
        self.fun = kw.get("fun","?")
        self.var = kw.get("var","?")
        self.description = " ".join([strc(x) for x in args])
    def __str__(self):
        result = "\nCheckWarning for argument "
        result += str(self.var)
        result += " of function "
        result += str(self.fun)
        result += "\n"
        result += self.description
        result += "(This can happen occasionally during normal operations and isn't necessarily a bug or problem.)\n"
        return result
def checktype(value,type_):
    """Check value against the type spec.  If everything
    is OK, this just returns the value itself.
    If the types don't check out, an exception is thrown."""
    # True skips any check
    if type_ is True:
        return value
    # types are checked using isinstance
    if type(type_)==type:
        if not isinstance(value,type_):
            raise CheckError("isinstance failed",value,"of type",type(value),"is not of type",type_)
        return value
    # for a list, check that all elements of a collection have a type
    # of some list element, allowing declarations like [str] or [str,unicode]
    # no recursive checks right now
    if type(type_)==list:
        if not numpy.iterable(value):
            raise CheckError("expected iterable",value)
        for x in value:
            if not reduce(max,[isinstance(x,t) for t in type_]):
                raise CheckError("element",x,"of type",type(x),"fails to be of type",type_)
        return value
    # for sets, check membership of the type in the set
    if type(type_)==set:
        for t in type_:
            if isinstance(value,t): return value
        raise CheckError("set membership failed",value,type_,var=var) # FIXME var?
    # for tuples, check that all conditions are satisfied
    if type(type_)==tuple:
        for t in type_:
            checktype(value,type_)
        return value
    # callables are just called and should either use assertions or
    # explicitly raise CheckError
    if callable(type_):
        type_(value)
        return value
    # otherwise, we don't understand the type spec
    raise Exception("unknown type spec: %s"%type_)
def checks(*types,**ktypes):
    """Check argument and return types against type specs at runtime."""
    def argument_check_decorator(f):
        @functools.wraps(f)
        def argument_checks(*args,**kw):
            # print("@@@", f, "decl", types, ktypes, "call",
            #       [strc(x) for x in args], kw)
            name = f.func_name
            argnames = f.func_code.co_varnames[:f.func_code.co_argcount]
            kw3 = [(var,value,ktypes.get(var,True)) for var,value in kw.items()]
            for var,value,type_ in zip(argnames,args,types)+kw3:
                try:
                    checktype(value,type_)
                except AssertionError as e:
                    raise CheckError(e.message,*e.args,var=var,fun=f)
                except CheckError as e:
                    e.fun = f
                    e.var = var
                    raise e
                except:
                    print("unknown exception while checking function:", name)
                    raise
            result = f(*args,**kw)
            checktype(result,kw.get("_",True))
            return result
        return argument_checks
    return argument_check_decorator
def makeargcheck(message,warning=0):
    """Converts a predicate into an argcheck."""
    def decorator(f):
        def wrapper(arg):
            if not f(arg):
                if warning:
                    raise CheckWarning(strc(arg)+" of type "+str(type(arg))+": "+str(message))
                else:
                    raise CheckError(strc(arg)+" of type "+str(type(arg))+": "+str(message))
        return wrapper
    return decorator
### Here are a whole bunch of type check predicates.
def ALL(*checks):
    def CHK_(x):
        for check in checks:
            check(x)
    return CHK_
def ANY(*checks):
    def CHK_(x):
        for check in checks:
            try:
                check(x)
                return
            except:
                pass
        raise CheckError(x,": failed all checks:",[strc(x) for x in checks])
    return CHK_
@makeargcheck("value should be type book or 0/1")
def BOOL(x):
    return isinstance(x,bool) or (isinstance(x,int) and x in [0,1])
@makeargcheck("value should be an int or a float")
def NUMBER(a):
    return isinstance(a,int) or isinstance(a,float)
def RANGE(lo,hi):
    @makeargcheck("value out of range [%g,%g]"%(lo,hi))
    def RANGE_(x):
        return x>=lo and x<=hi
    return RANGE_
def ARANK(n):
    @makeargcheck("array must have rank %d"%n)
    def ARANK_(a):
        if not hasattr(a,"ndim"): return 0
        return a.ndim==n
    return ARANK_
def ARANGE(lo,hi):
    @makeargcheck("array values must be within [%g,%g]"%(lo,hi))
    def ARANGE_(a):
        return numpy.amin(a)>=lo and numpy.amax(a)<=hi
    return ARANGE_
@makeargcheck("array elements must be non-negative")
def ANONNEG(a):
    return numpy.amin(a)>=0
float_dtypes = [numpy.dtype('float32'),numpy.dtype('float64')]
try: float_dtypes += [numpy.dtype('float96')]
except: pass
try: float_dtypes += [numpy.dtype('float128')]
except: pass
@makeargcheck("array must contain floating point values")
def AFLOAT(a):
    return a.dtype in float_dtypes
int_dtypes = [numpy.dtype('uint8'),numpy.dtype('int32'),numpy.dtype('int64'),numpy.dtype('uint32'),numpy.dtype('uint64')]
@makeargcheck("array must contain integer values")
def AINT(a):
    return a.dtype in int_dtypes
@makeargcheck("expected a byte (uint8) array")
def ABYTE(a):
    return a.dtype==numpy.dtype('B')
@makeargcheck("expect tuple of int")
def inttuple(a):
    if isinstance(a,int): return 1
    if not (tuple(a) or list(a)): return 0
    for x in a:
        if not isinstance(x,int): return 0
    return 1
@makeargcheck("expect tuple of nonnegative int")
def uinttuple(a):
    if isinstance(a,int): return 1
    if not (tuple(a) or list(a)): return 0
    for x in a:
        if not isinstance(x,int): return 0
        if x<0: return 0
    return 1
@makeargcheck("expect pair of int")
def uintpair(a):
    if not tuple(a): return 0
    if not len(a)==2: return 0
    if a[0]<0: return 0
    if a[1]<0: return 0
    return 1
@makeargcheck("expect a rectangle as a pair of slices")
def RECTANGLE(a):
    if not tuple(a): return 0
    if not isinstance(a[0],slice): return 0
    if not isinstance(a[1],slice): return 0
    return 1
### specific kinds of arrays
ARRAY1 = ARANK(1)
ARRAY2 = ARANK(2)
ARRAY3 = ARANK(3)
AINT1 = ALL(ARANK(1),AINT)
AINT2 = ALL(ARANK(2),AINT)
AINT3 = ALL(ARANK(3),AINT)
AFLOAT1 = ALL(ARANK(1),AFLOAT)
AFLOAT2 = ALL(ARANK(2),AFLOAT)
AFLOAT3 = ALL(ARANK(3),AFLOAT)
@makeargcheck("expected a boolean array or an array of 0/1")
def ABINARY(a):
    if a.ndim==2 and a.dtype==numpy.dtype(bool): return 1
    if not a.dtype in int_dtypes: return 0
    import scipy.ndimage.measurements
    zeros,ones = scipy.ndimage.measurements.sum(1,a,[0,1])
    if zeros+ones == a.size: return 1
    if a.dtype==numpy.dtype('B'):
        zeros,ones = scipy.ndimage.measurements.sum(1,a,[0,255])
        if zeros+ones == a.size: return 1
    return 0
ABINARY1 = ALL(ABINARY,ARRAY1)
ABINARY2 = ALL(ABINARY,ARRAY2)
ABINARY3 = ALL(ABINARY,ARRAY3)
def CHANNELS(n):
    @makeargcheck("expected %d channels"%n)
    def CHANNELS_(a):
        return a.shape[-1]==n
    return CHANNELS_
GRAYSCALE = AFLOAT2
GRAYSCALE1 = ALL(AFLOAT2,ARANGE(0,1))
BYTEIMAGE = ALL(ARANK(2),ABYTE)
RGB = ALL(ARANK(3),ABYTE,CHANNELS(3))
RGBA = ALL(ARANK(3),ABYTE,CHANNELS(4))
### image arrays with more complicated image properties
@makeargcheck("expect a light image (median>mean)",warning=1)
def LIGHT(a):
    return numpy.median(a)>=numpy.mean(a)
@makeargcheck("expect a dark image (median<mean)",warning=1)
def DARK(a):
    return numpy.median(a)<=numpy.mean(a)
@makeargcheck("expect a page image (larger than 600x600)",warning=1)
def PAGE(a):
    return a.ndim==2 and a.shape[0]>=600 and a.shape[1]>=600
@makeargcheck("expected a line image (taller than 8 pixels and wider than tall)",warning=1)
def LINE(a,var=None):
    return a.ndim==2 and a.shape[0]>8 # and a.shape[1]>a.shape[0]
BINPAGE = ALL(PAGE,ABINARY2)
LIGHTPAGE = ALL(PAGE,LIGHT)
DARKPAGE = ALL(PAGE,DARK)
LIGHTLINE = ALL(LINE,LIGHT)
DARKLINE = ALL(LINE,DARK)
@makeargcheck("expected a small grayscale patch with values between 0 and 1")
def PATCH(a):
    GRAYSCALE1(a)
    return a.shape[0]<=256 and a.shape[1]<=256
### segmentation-related checks
###
### Segmentations come in two flavors: with a white background (for writing to disk
### so that one can see something in file browsers), and with a black background
### (for easy processing). Light segmentations should only exist on disk.
@makeargcheck("expected a segmentation image")
def SEGMENTATION(a):
    return isinstance(a,numpy.ndarray) and a.ndim==2 and a.dtype in ['int32','int64']
@makeargcheck("expected a segmentation with white background")
def WHITESEG(a):
    return numpy.amax(a)==0xffffff
@makeargcheck("expected a segmentation with black background")
def BLACKSEG(a):
    return numpy.amax(a)<0xffffff
@makeargcheck("all non-zero pixels in a page segmentation must have a column value >0")
def PAGEEXTRA(a):
    u = numpy.unique(a)
    u = u[u!=0]
    u = u[(u&0xff0000)==0]
    return len(u)==0
LIGHTSEG = ALL(SEGMENTATION,WHITESEG)
DARKSEG = ALL(SEGMENTATION,BLACKSEG)
PAGESEG = ALL(SEGMENTATION,BLACKSEG,PAGE,PAGEEXTRA)
LINESEG = ALL(SEGMENTATION,BLACKSEG,LINE)
LIGHTPAGESEG = ALL(SEGMENTATION,WHITESEG,PAGE)
LIGHTLINESEG = ALL(SEGMENTATION,WHITESEG,LINE)
### special types for pattern recognition
def TDATASET(a):
    if type(a[0])!=numpy.ndarray:
        raise CheckError("dataset fails to yield ndarray on subscripting")
def DATASET_SIZE(lo=3,hi=int(1e9)):
    @makeargcheck("data set size should be between %s and %s"%(lo,hi))
    def DSSIZE_(a):
        return len(a)>=lo and len(a)<=hi
    return DSSIZE_
def DATASET_VRANK(n):
    @makeargcheck("data set vectors should have a rank of %d"%n)
    def DSVRANK_(a):
        return n<0 or a[0].ndim==n
    return DSVRANK_
def DATASET_VSIZE(lo,hi):
    @makeargcheck("data vector size should be between %d and %d"%(lo,hi))
    def DSVSIZE_(a):
        return a[0].size>=lo and a[0].size<=hi
    return DSVSIZE_
def DATASET_VRANGE(lo,hi):
    @makeargcheck("data set values should be in the range of %g to %g"%(lo,hi))
    def DSVRANGE_(a):
        # just a quick sanity check
        return numpy.amin(a[0])>=lo and numpy.amax(a[0])<=hi
    return DSVRANGE_
def DATASET(size0=3,size1=int(1e9),vsize0=2,vsize1=100000,vrank=-1,vrange0=-300,vrange1=300,fixedshape=0):
    return ALL(TDATASET,
               DATASET_SIZE(size0,size1),
               DATASET_VRANK(vrank),
               DATASET_VSIZE(vsize0,vsize1),
               DATASET_VRANGE(vrange0,vrange1))
 | |
| 
	import os
import random
import pygame
import zwave.helper
class Player(pygame.sprite.Sprite):
    def __init__(self, game, model = "01"):
        super(Player, self).__init__()
        self.weapon = {}
        self.weapon["type"] = "gun"
        self.weapon["delay"] = 20
        self.weapon["timer"] = 0
        self.weapon["damage"] = [35, 65]
        self.weapon["bullets"] = []
        self.life = 100
        self.total_life = 100
        self.speed = 2
        self.score = 0
        self.kills = {}
        self.kills["zombies"] = 0
        self.kills["headcrabs"] = 0
        ## init values ##
        self.game = game
        self.model = model
        self.size = 65 * game.scale
        self.angle = 0
        self.center = {}
        self.last = {}
        self.generate_position()
        path = os.path.join("assets", "img", "players", self.model, "sprite.png")
        self.image_base = zwave.helper.pygame_image(path, self.size)
        self.image = self.image_base
        self.rect = self.image.get_rect()
        self.rect.x = self.x
        self.rect.y = self.y
        self.set_colliders()
    def generate_position(self):
        ## set position ##
        self.x = self.game.center["x"] - (self.size / 2)
        self.y = self.game.center["y"] - (self.size / 2)
        ## saves the actual position of the enemy, relative to game screen ##
        self.center["x"] = self.game.center["x"]
        self.center["y"] = self.game.center["y"]
    def set_colliders(self):
        ## default collider, with same size of sprite image ##
        self.collider1 = pygame.sprite.GroupSingle(self)
        ## touch/collider2 is a small collider for player, that simulates a better "touch" for the player, ##
        ## without the large original image edges ##
        self.touch = pygame.sprite.Sprite()
        self.touch.up = self
        self.touch.size = int(self.size / 2)
        self.touch.image = pygame.surface.Surface((self.touch.size, self.touch.size))
        self.touch.image.fill((255, 0, 0))
        self.touch.image.set_colorkey((255, 0, 0))
        self.touch.rect = self.touch.image.get_rect()
        self.touch.rect.x = self.center["x"] - (self.touch.size / 2)
        self.touch.rect.y = self.center["y"] - (self.touch.size / 2)
        self.collider2 = pygame.sprite.GroupSingle(self.touch)
    def update_colliders(self):
        ## update position of the second collider of enemy ##
        self.touch.rect.x = self.center["x"] - (self.touch.size / 2)
        self.touch.rect.y = self.center["y"] - (self.touch.size / 2)
    def collision(self, collider1, collider2):
        ## check collider 1 ##
        if collider1 == "walls":
            collider1 = self.game.map.collider["walls"]
        elif collider1 == "enemies":
            collider1 = self.game.enemies["colliders"]
        return pygame.sprite.groupcollide(collider2, collider1, False, False)
    def update_angle(self):
        ## update enemy angle based in player location ##
        self.angle = zwave.helper.angle_by_two_points(self.center, self.game.mouse)
        self.image = zwave.helper.pygame_rotate(self.image_base, self.angle)
    def update_position(self):
        ## check if had collision, if had, set last position of view ##
        if self.collision("walls", self.collider2) or self.collision("enemies", self.collider2):
            self.game.x = self.game.last["x"]
            self.game.y = self.game.last["y"]
        ## save current positon of view for future use ##
        self.game.last["x"] = self.game.x
        self.game.last["y"] = self.game.y
        ## make 'keys' variable with pressed keys
        keys = pygame.key.get_pressed()
        ## footsteps sound if the player is walking ##
        if keys[pygame.K_w] or keys[pygame.K_s] or keys[pygame.K_a] or keys[pygame.K_d]:
            if not self.game.sound["channels"]["steps"].get_busy():
                self.game.sound["channels"]["steps"].play(self.game.sound["steps"], -1)
        else:
            self.game.sound["channels"]["steps"].stop()
        
        ## picks speed for each axis ##
        velocity = zwave.helper.velocity_by_keys(self.speed * self.game.scale, keys)
        ## movement according to keys down ##
        if keys[pygame.K_w]:
            self.game.y -= velocity
        if keys[pygame.K_s]:
            self.game.y += velocity
        if keys[pygame.K_a]:
            self.game.x -= velocity
        if keys[pygame.K_d]:
            self.game.x += velocity
  
    def shot(self):
        ## checks if timer for the shot is zero ##
        if (self.weapon["timer"] == 0) and (self.alive()):
            ## check if the type of weapon is gun ##
            if self.weapon["type"] == "gun":
                angle = zwave.helper.angle_by_two_points(self.center, self.game.mouse)
                bullet = Bullet(angle, self.game)
                self.weapon["bullets"].append(bullet)
                ## gunshot sound ##
                self.game.sound["channels"]["attacks"].play(self.game.sound["gunshot"], 0)
                ## add timer for next gunshot ##
                self.weapon["timer"] = self.weapon["delay"]
    def update_bullets(self):
        ## random damage by weapon damage range ##
        damage = random.randint(self.weapon["damage"][0], self.weapon["damage"][1])
        ## get all bullets instances ##
        for bullet in self.weapon["bullets"]:
            collider = bullet.collider()
            ## check collision with walls ##
            if self.collision("walls", collider):
                bullet.kill()
            
            ## check collision with enemies ##
            elif self.collision("enemies", collider):
                enemy = self.collision("enemies", collider)[bullet][0].up
                enemy.life -= damage
                bullet.kill()
            ## if had no collision ##
            else:
                bullet.update()
    def draw(self):
        for bullet in self.weapon["bullets"]:
            group = bullet.collider()
            group.draw(self.game.screen)
        self.collider1.draw(self.game.screen)
        self.collider2.draw(self.game.screen)
    def wave_update(self):
        if self.weapon["damage"][0] < 100:
            self.weapon["damage"][0] += 10
            self.weapon["damage"][1] += 20
        if self.weapon["delay"] > 20:
            self.weapon["delay"] -= 3
        if self.total_life < 300:
            self.total_life += 10
        if self.life < self.total_life:
            if (self.total_life - self.life) >= 25:
                self.life += 25
            else:
                self.life += self.total_life - self.life
        if self.speed < 4: 
            self.speed += 0.1
    def kill(self):
        if self.game.sound["channels"]["steps"].get_busy():
            self.game.sound["channels"]["steps"].stop()
        self.touch.kill()
        super(Player, self).kill()
    def update(self):
        if self.life > 0:
            ## update gunshot timer ##
            if self.weapon["timer"] > 0:
                self.weapon["timer"] -= 1
            self.update_bullets()
            self.update_angle()
            self.update_position()
            self.update_colliders()
        elif self.alive():
            self.kill()
class Bullet(pygame.sprite.Sprite):
    def __init__(self, angle, game):
        pygame.sprite.Sprite.__init__(self)
        ## init values ##
        self.angle = angle - 180
        self.size = 10 * game.scale
        path = os.path.join("assets", "img", "bullet.png")
        self.image = zwave.helper.pygame_image(path, self.size)
        self.image = zwave.helper.pygame_rotate(self.image, angle)
        self.rect = self.image.get_rect()
        self.rect.x = game.player.center["x"] - (self.size / 2)
        self.rect.y = game.player.center["y"] - (self.size / 2)
        self.velocity = zwave.helper.velocity_by_angle(35 * game.scale, self.angle)
        self.sgroup = pygame.sprite.GroupSingle(self)
    def update(self):
        self.rect.x -= self.velocity["x"]
        self.rect.y -= self.velocity["y"]
    def collider(self):
        return self.sgroup
 | |
| 
	# Copyright 2015 Red Hat, Inc.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.
"""Tests for the ironic driver."""
from ironicclient import exc as ironic_exception
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_service import loopingcall
from oslo_utils import uuidutils
import six
from testtools.matchers import HasLength
from nova.api.metadata import base as instance_metadata
from nova.compute import power_state as nova_states
from nova.compute import task_states
from nova.compute import vm_states
from nova import context as nova_context
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit import utils
from nova.tests.unit.virt.ironic import utils as ironic_utils
from nova.virt import configdrive
from nova.virt import driver
from nova.virt import fake
from nova.virt import firewall
from nova.virt import hardware
from nova.virt.ironic import client_wrapper as cw
from nova.virt.ironic import driver as ironic_driver
from nova.virt.ironic import ironic_states
CONF = cfg.CONF
IRONIC_FLAGS = dict(
    api_version=1,
    group='ironic',
)
FAKE_CLIENT = ironic_utils.FakeClient()
class FakeClientWrapper(cw.IronicClientWrapper):
    def _get_client(self):
        return FAKE_CLIENT
class FakeLoopingCall(object):
    def __init__(self):
        self.wait = mock.MagicMock()
        self.start = mock.MagicMock()
        self.start.return_value = self
def _get_properties():
    return {'cpus': 2,
            'memory_mb': 512,
            'local_gb': 10,
            'cpu_arch': 'x86_64',
            'capabilities': None}
def _get_stats():
    return {'cpu_arch': 'x86_64'}
FAKE_CLIENT_WRAPPER = FakeClientWrapper()
@mock.patch.object(cw, 'IronicClientWrapper', lambda *_: FAKE_CLIENT_WRAPPER)
class IronicDriverTestCase(test.NoDBTestCase):
    @mock.patch.object(cw, 'IronicClientWrapper',
                       lambda *_: FAKE_CLIENT_WRAPPER)
    def setUp(self):
        super(IronicDriverTestCase, self).setUp()
        self.flags(**IRONIC_FLAGS)
        # set client log config to exercise the code that manipulates it
        CONF.set_override('client_log_level', 'DEBUG', group='ironic')
        self.driver = ironic_driver.IronicDriver(None)
        self.driver.virtapi = fake.FakeVirtAPI()
        self.ctx = nova_context.get_admin_context()
        self.instance_uuid = uuidutils.generate_uuid()
        # mock retries configs to avoid sleeps and make tests run quicker
        CONF.set_default('api_max_retries', default=1, group='ironic')
        CONF.set_default('api_retry_interval', default=0, group='ironic')
    def test_public_api_signatures(self):
        self.assertPublicAPISignatures(driver.ComputeDriver(None), self.driver)
    def test_validate_driver_loading(self):
        self.assertIsInstance(self.driver, ironic_driver.IronicDriver)
    def test_driver_capabilities(self):
        self.assertFalse(self.driver.capabilities['has_imagecache'],
                         'Driver capabilities for \'has_imagecache\''
                         'is invalid')
        self.assertFalse(self.driver.capabilities['supports_recreate'],
                         'Driver capabilities for \'supports_recreate\''
                         'is invalid')
    def test__get_hypervisor_type(self):
        self.assertEqual('ironic', self.driver._get_hypervisor_type())
    def test__get_hypervisor_version(self):
        self.assertEqual(1, self.driver._get_hypervisor_version())
    @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
    def test__validate_instance_and_node(self, mock_gbiui):
        node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
        node = ironic_utils.get_test_node(uuid=node_uuid,
                                          instance_uuid=self.instance_uuid)
        instance = fake_instance.fake_instance_obj(self.ctx,
                                                   uuid=self.instance_uuid)
        ironicclient = cw.IronicClientWrapper()
        mock_gbiui.return_value = node
        result = ironic_driver._validate_instance_and_node(ironicclient,
                                                           instance)
        self.assertEqual(result.uuid, node_uuid)
    @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
    def test__validate_instance_and_node_failed(self, mock_gbiui):
        ironicclient = cw.IronicClientWrapper()
        mock_gbiui.side_effect = ironic_exception.NotFound()
        instance = fake_instance.fake_instance_obj(self.ctx,
                                                   uuid=self.instance_uuid)
        self.assertRaises(exception.InstanceNotFound,
                          ironic_driver._validate_instance_and_node,
                          ironicclient, instance)
    @mock.patch.object(objects.Instance, 'refresh')
    @mock.patch.object(ironic_driver, '_validate_instance_and_node')
    def test__wait_for_active_pass(self, fake_validate, fake_refresh):
        instance = fake_instance.fake_instance_obj(self.ctx,
                uuid=uuidutils.generate_uuid())
        node = ironic_utils.get_test_node(
                provision_state=ironic_states.DEPLOYING)
        fake_validate.return_value = node
        self.driver._wait_for_active(FAKE_CLIENT, instance)
        fake_validate.assert_called_once_with(FAKE_CLIENT, instance)
        fake_refresh.assert_called_once_with()
    @mock.patch.object(objects.Instance, 'refresh')
    @mock.patch.object(ironic_driver, '_validate_instance_and_node')
    def test__wait_for_active_done(self, fake_validate, fake_refresh):
        instance = fake_instance.fake_instance_obj(self.ctx,
                uuid=uuidutils.generate_uuid())
        node = ironic_utils.get_test_node(
                provision_state=ironic_states.ACTIVE)
        fake_validate.return_value = node
        self.assertRaises(loopingcall.LoopingCallDone,
                self.driver._wait_for_active,
                FAKE_CLIENT, instance)
        fake_validate.assert_called_once_with(FAKE_CLIENT, instance)
        fake_refresh.assert_called_once_with()
    @mock.patch.object(objects.Instance, 'refresh')
    @mock.patch.object(ironic_driver, '_validate_instance_and_node')
    def test__wait_for_active_fail(self, fake_validate, fake_refresh):
        instance = fake_instance.fake_instance_obj(self.ctx,
                uuid=uuidutils.generate_uuid())
        node = ironic_utils.get_test_node(
                provision_state=ironic_states.DEPLOYFAIL)
        fake_validate.return_value = node
        self.assertRaises(exception.InstanceDeployFailure,
                self.driver._wait_for_active,
                FAKE_CLIENT, instance)
        fake_validate.assert_called_once_with(FAKE_CLIENT, instance)
        fake_refresh.assert_called_once_with()
    @mock.patch.object(objects.Instance, 'refresh')
    @mock.patch.object(ironic_driver, '_validate_instance_and_node')
    def _wait_for_active_abort(self, instance_params, fake_validate,
                              fake_refresh):
        instance = fake_instance.fake_instance_obj(self.ctx,
                uuid=uuidutils.generate_uuid(),
                **instance_params)
        self.assertRaises(exception.InstanceDeployFailure,
                self.driver._wait_for_active,
                FAKE_CLIENT, instance)
        # Assert _validate_instance_and_node wasn't called
        self.assertFalse(fake_validate.called)
        fake_refresh.assert_called_once_with()
    def test__wait_for_active_abort_deleting(self):
        self._wait_for_active_abort({'task_state': task_states.DELETING})
    def test__wait_for_active_abort_deleted(self):
        self._wait_for_active_abort({'vm_state': vm_states.DELETED})
    def test__wait_for_active_abort_error(self):
        self._wait_for_active_abort({'vm_state': vm_states.ERROR})
    @mock.patch.object(ironic_driver, '_validate_instance_and_node')
    def test__wait_for_power_state_pass(self, fake_validate):
        instance = fake_instance.fake_instance_obj(self.ctx,
                uuid=uuidutils.generate_uuid())
        node = ironic_utils.get_test_node(
                target_power_state=ironic_states.POWER_OFF)
        fake_validate.return_value = node
        self.driver._wait_for_power_state(
                FAKE_CLIENT, instance, 'fake message')
        self.assertTrue(fake_validate.called)
    @mock.patch.object(ironic_driver, '_validate_instance_and_node')
    def test__wait_for_power_state_ok(self, fake_validate):
        instance = fake_instance.fake_instance_obj(self.ctx,
                uuid=uuidutils.generate_uuid())
        node = ironic_utils.get_test_node(
                target_power_state=ironic_states.NOSTATE)
        fake_validate.return_value = node
        self.assertRaises(loopingcall.LoopingCallDone,
                self.driver._wait_for_power_state,
                FAKE_CLIENT, instance, 'fake message')
        self.assertTrue(fake_validate.called)
    def test__node_resource(self):
        node_uuid = uuidutils.generate_uuid()
        props = _get_properties()
        stats = _get_stats()
        node = ironic_utils.get_test_node(uuid=node_uuid,
                                          instance_uuid=self.instance_uuid,
                                          properties=props)
        result = self.driver._node_resource(node)
        wantkeys = ["hypervisor_hostname", "hypervisor_type",
                    "hypervisor_version", "cpu_info",
                    "vcpus", "vcpus_used",
                    "memory_mb", "memory_mb_used",
                    "local_gb", "local_gb_used",
                    "disk_available_least",
                    "supported_instances",
                    "stats",
                    "numa_topology"]
        wantkeys.sort()
        gotkeys = result.keys()
        gotkeys.sort()
        self.assertEqual(wantkeys, gotkeys)
        self.assertEqual(props['cpus'], result['vcpus'])
        self.assertEqual(props['cpus'], result['vcpus_used'])
        self.assertEqual(props['memory_mb'], result['memory_mb'])
        self.assertEqual(props['memory_mb'], result['memory_mb_used'])
        self.assertEqual(props['local_gb'], result['local_gb'])
        self.assertEqual(props['local_gb'], result['local_gb_used'])
        self.assertEqual(node_uuid, result['hypervisor_hostname'])
        self.assertEqual(stats, jsonutils.loads(result['stats']))
        self.assertIsNone(result['numa_topology'])
    def test__node_resource_canonicalizes_arch(self):
        node_uuid = uuidutils.generate_uuid()
        props = _get_properties()
        props['cpu_arch'] = 'i386'
        node = ironic_utils.get_test_node(uuid=node_uuid, properties=props)
        result = self.driver._node_resource(node)
        self.assertEqual('i686',
                         jsonutils.loads(result['supported_instances'])[0][0])
        self.assertEqual('i386',
                         jsonutils.loads(result['stats'])['cpu_arch'])
    def test__node_resource_unknown_arch(self):
        node_uuid = uuidutils.generate_uuid()
        props = _get_properties()
        del props['cpu_arch']
        node = ironic_utils.get_test_node(uuid=node_uuid, properties=props)
        result = self.driver._node_resource(node)
        self.assertEqual([], jsonutils.loads(result['supported_instances']))
    def test__node_resource_exposes_capabilities(self):
        props = _get_properties()
        props['capabilities'] = 'test:capability'
        node = ironic_utils.get_test_node(properties=props)
        result = self.driver._node_resource(node)
        stats = jsonutils.loads(result['stats'])
        self.assertIsNone(stats.get('capabilities'))
        self.assertEqual('capability', stats.get('test'))
    def test__node_resource_no_capabilities(self):
        props = _get_properties()
        props['capabilities'] = None
        node = ironic_utils.get_test_node(properties=props)
        result = self.driver._node_resource(node)
        self.assertIsNone(jsonutils.loads(result['stats']).get('capabilities'))
    def test__node_resource_malformed_capabilities(self):
        props = _get_properties()
        props['capabilities'] = 'test:capability,:no_key,no_val:'
        node = ironic_utils.get_test_node(properties=props)
        result = self.driver._node_resource(node)
        stats = jsonutils.loads(result['stats'])
        self.assertEqual('capability', stats.get('test'))
    def test__node_resource_available(self):
        node_uuid = uuidutils.generate_uuid()
        props = _get_properties()
        stats = _get_stats()
        node = ironic_utils.get_test_node(
            uuid=node_uuid,
            instance_uuid=None,
            power_state=ironic_states.POWER_OFF,
            properties=props,
            provision_state=ironic_states.AVAILABLE)
        result = self.driver._node_resource(node)
        self.assertEqual(props['cpus'], result['vcpus'])
        self.assertEqual(0, result['vcpus_used'])
        self.assertEqual(props['memory_mb'], result['memory_mb'])
        self.assertEqual(0, result['memory_mb_used'])
        self.assertEqual(props['local_gb'], result['local_gb'])
        self.assertEqual(0, result['local_gb_used'])
        self.assertEqual(node_uuid, result['hypervisor_hostname'])
        self.assertEqual(stats, jsonutils.loads(result['stats']))
    @mock.patch.object(ironic_driver.IronicDriver,
                       '_node_resources_unavailable')
    def test__node_resource_unavailable_node_res(self, mock_res_unavail):
        mock_res_unavail.return_value = True
        node_uuid = uuidutils.generate_uuid()
        props = _get_properties()
        stats = _get_stats()
        node = ironic_utils.get_test_node(uuid=node_uuid,
                                          instance_uuid=None,
                                          properties=props)
        result = self.driver._node_resource(node)
        self.assertEqual(0, result['vcpus'])
        self.assertEqual(0, result['vcpus_used'])
        self.assertEqual(0, result['memory_mb'])
        self.assertEqual(0, result['memory_mb_used'])
        self.assertEqual(0, result['local_gb'])
        self.assertEqual(0, result['local_gb_used'])
        self.assertEqual(node_uuid, result['hypervisor_hostname'])
        self.assertEqual(stats, jsonutils.loads(result['stats']))
    @mock.patch.object(ironic_driver.IronicDriver,
                       '_node_resources_used')
    def test__node_resource_used_node_res(self, mock_res_used):
        mock_res_used.return_value = True
        node_uuid = uuidutils.generate_uuid()
        props = _get_properties()
        stats = _get_stats()
        node = ironic_utils.get_test_node(
            uuid=node_uuid,
            instance_uuid=uuidutils.generate_uuid(),
            provision_state=ironic_states.ACTIVE,
            properties=props)
        result = self.driver._node_resource(node)
        self.assertEqual(props['cpus'], result['vcpus'])
        self.assertEqual(props['cpus'], result['vcpus_used'])
        self.assertEqual(props['memory_mb'], result['memory_mb'])
        self.assertEqual(props['memory_mb'], result['memory_mb_used'])
        self.assertEqual(props['local_gb'], result['local_gb'])
        self.assertEqual(props['local_gb'], result['local_gb_used'])
        self.assertEqual(node_uuid, result['hypervisor_hostname'])
        self.assertEqual(stats, jsonutils.loads(result['stats']))
    @mock.patch.object(ironic_driver.LOG, 'warning')
    def test__parse_node_properties(self, mock_warning):
        props = _get_properties()
        node = ironic_utils.get_test_node(
            uuid=uuidutils.generate_uuid(),
            properties=props)
        # raw_cpu_arch is included because extra_specs filters do not
        # canonicalized the arch
        props['raw_cpu_arch'] = props['cpu_arch']
        parsed = self.driver._parse_node_properties(node)
        self.assertEqual(props, parsed)
        # Assert we didn't log any warning since all properties are
        # correct
        self.assertFalse(mock_warning.called)
    @mock.patch.object(ironic_driver.LOG, 'warning')
    def test__parse_node_properties_bad_values(self, mock_warning):
        props = _get_properties()
        props['cpus'] = 'bad-value'
        props['memory_mb'] = 'bad-value'
        props['local_gb'] = 'bad-value'
        props['cpu_arch'] = 'bad-value'
        node = ironic_utils.get_test_node(
            uuid=uuidutils.generate_uuid(),
            properties=props)
        # raw_cpu_arch is included because extra_specs filters do not
        # canonicalized the arch
        props['raw_cpu_arch'] = props['cpu_arch']
        parsed = self.driver._parse_node_properties(node)
        expected_props = props.copy()
        expected_props['cpus'] = 0
        expected_props['memory_mb'] = 0
        expected_props['local_gb'] = 0
        expected_props['cpu_arch'] = None
        self.assertEqual(expected_props, parsed)
        self.assertEqual(4, mock_warning.call_count)
    @mock.patch.object(ironic_driver.LOG, 'warning')
    def test__parse_node_properties_canonicalize_cpu_arch(self, mock_warning):
        props = _get_properties()
        props['cpu_arch'] = 'amd64'
        node = ironic_utils.get_test_node(
            uuid=uuidutils.generate_uuid(),
            properties=props)
        # raw_cpu_arch is included because extra_specs filters do not
        # canonicalized the arch
        props['raw_cpu_arch'] = props['cpu_arch']
        parsed = self.driver._parse_node_properties(node)
        expected_props = props.copy()
        # Make sure it cpu_arch was canonicalized
        expected_props['cpu_arch'] = 'x86_64'
        self.assertEqual(expected_props, parsed)
        # Assert we didn't log any warning since all properties are
        # correct
        self.assertFalse(mock_warning.called)
    @mock.patch.object(firewall.NoopFirewallDriver, 'prepare_instance_filter',
                       create=True)
    @mock.patch.object(firewall.NoopFirewallDriver, 'setup_basic_filtering',
                       create=True)
    @mock.patch.object(firewall.NoopFirewallDriver, 'apply_instance_filter',
                       create=True)
    def test__start_firewall(self, mock_aif, mock_sbf, mock_pif):
        fake_inst = 'fake-inst'
        fake_net_info = utils.get_test_network_info()
        self.driver._start_firewall(fake_inst, fake_net_info)
        mock_aif.assert_called_once_with(fake_inst, fake_net_info)
        mock_sbf.assert_called_once_with(fake_inst, fake_net_info)
        mock_pif.assert_called_once_with(fake_inst, fake_net_info)
    @mock.patch.object(firewall.NoopFirewallDriver, 'unfilter_instance',
                       create=True)
    def test__stop_firewall(self, mock_ui):
        fake_inst = 'fake-inst'
        fake_net_info = utils.get_test_network_info()
        self.driver._stop_firewall(fake_inst, fake_net_info)
        mock_ui.assert_called_once_with(fake_inst, fake_net_info)
    @mock.patch.object(cw.IronicClientWrapper, 'call')
    def test_instance_exists(self, mock_call):
        instance = fake_instance.fake_instance_obj(self.ctx,
                                                   uuid=self.instance_uuid)
        self.assertTrue(self.driver.instance_exists(instance))
        mock_call.assert_called_once_with('node.get_by_instance_uuid',
                                          self.instance_uuid)
    @mock.patch.object(cw.IronicClientWrapper, 'call')
    def test_instance_exists_fail(self, mock_call):
        mock_call.side_effect = ironic_exception.NotFound
        instance = fake_instance.fake_instance_obj(self.ctx,
                                                   uuid=self.instance_uuid)
        self.assertFalse(self.driver.instance_exists(instance))
        mock_call.assert_called_once_with('node.get_by_instance_uuid',
                                          self.instance_uuid)
    @mock.patch.object(cw.IronicClientWrapper, 'call')
    @mock.patch.object(objects.Instance, 'get_by_uuid')
    def test_list_instances(self, mock_inst_by_uuid, mock_call):
        nodes = []
        instances = []
        for i in range(2):
            uuid = uuidutils.generate_uuid()
            instances.append(fake_instance.fake_instance_obj(self.ctx,
                                                             id=i,
                                                             uuid=uuid))
            nodes.append(ironic_utils.get_test_node(instance_uuid=uuid))
        mock_inst_by_uuid.side_effect = instances
        mock_call.return_value = nodes
        response = self.driver.list_instances()
        mock_call.assert_called_with("node.list", associated=True, limit=0)
        expected_calls = [mock.call(mock.ANY, instances[0].uuid),
                          mock.call(mock.ANY, instances[1].uuid)]
        mock_inst_by_uuid.assert_has_calls(expected_calls)
        self.assertEqual(['instance-00000000', 'instance-00000001'],
                          sorted(response))
    @mock.patch.object(cw.IronicClientWrapper, 'call')
    @mock.patch.object(objects.Instance, 'get_by_uuid')
    def test_list_instances_fail(self, mock_inst_by_uuid, mock_call):
        mock_call.side_effect = exception.NovaException
        response = self.driver.list_instances()
        mock_call.assert_called_with("node.list", associated=True, limit=0)
        self.assertFalse(mock_inst_by_uuid.called)
        self.assertThat(response, HasLength(0))
    @mock.patch.object(cw.IronicClientWrapper, 'call')
    def test_list_instance_uuids(self, mock_call):
        num_nodes = 2
        nodes = []
        for n in range(num_nodes):
            nodes.append(ironic_utils.get_test_node(
                                      instance_uuid=uuidutils.generate_uuid()))
        mock_call.return_value = nodes
        uuids = self.driver.list_instance_uuids()
        mock_call.assert_called_with('node.list', associated=True, limit=0)
        expected = [n.instance_uuid for n in nodes]
        self.assertEqual(sorted(expected), sorted(uuids))
    @mock.patch.object(FAKE_CLIENT.node, 'list')
    @mock.patch.object(FAKE_CLIENT.node, 'get')
    def test_node_is_available_empty_cache_empty_list(self, mock_get,
                                                      mock_list):
        node = ironic_utils.get_test_node()
        mock_get.return_value = node
        mock_list.return_value = []
        self.assertTrue(self.driver.node_is_available(node.uuid))
        mock_get.assert_called_with(node.uuid)
        mock_list.assert_called_with(detail=True, limit=0)
        mock_get.side_effect = ironic_exception.NotFound
        self.assertFalse(self.driver.node_is_available(node.uuid))
    @mock.patch.object(FAKE_CLIENT.node, 'list')
    @mock.patch.object(FAKE_CLIENT.node, 'get')
    def test_node_is_available_empty_cache(self, mock_get, mock_list):
        node = ironic_utils.get_test_node()
        mock_get.return_value = node
        mock_list.return_value = [node]
        self.assertTrue(self.driver.node_is_available(node.uuid))
        mock_list.assert_called_with(detail=True, limit=0)
        self.assertEqual(0, mock_get.call_count)
    @mock.patch.object(FAKE_CLIENT.node, 'list')
    @mock.patch.object(FAKE_CLIENT.node, 'get')
    def test_node_is_available_with_cache(self, mock_get, mock_list):
        node = ironic_utils.get_test_node()
        mock_get.return_value = node
        mock_list.return_value = [node]
        # populate the cache
        self.driver.get_available_nodes(refresh=True)
        # prove that zero calls are made after populating cache
        mock_list.reset_mock()
        self.assertTrue(self.driver.node_is_available(node.uuid))
        self.assertEqual(0, mock_list.call_count)
        self.assertEqual(0, mock_get.call_count)
    def test__node_resources_unavailable(self):
        node_dicts = [
            # a node in maintenance /w no instance and power OFF
            {'uuid': uuidutils.generate_uuid(),
             'maintenance': True,
             'power_state': ironic_states.POWER_OFF,
             'provision_state': ironic_states.AVAILABLE},
            # a node in maintenance /w no instance and ERROR power state
            {'uuid': uuidutils.generate_uuid(),
             'maintenance': True,
             'power_state': ironic_states.ERROR,
             'provision_state': ironic_states.AVAILABLE},
            # a node not in maintenance /w no instance and bad power state
            {'uuid': uuidutils.generate_uuid(),
             'power_state': ironic_states.NOSTATE,
             'provision_state': ironic_states.AVAILABLE},
            # a node not in maintenance or bad power state, bad provision state
            {'uuid': uuidutils.generate_uuid(),
             'power_state': ironic_states.POWER_ON,
             'provision_state': ironic_states.MANAGEABLE},
            # a node in cleaning
            {'uuid': uuidutils.generate_uuid(),
             'power_state': ironic_states.POWER_ON,
             'provision_state': ironic_states.CLEANING},
            # a node in cleaning, waiting for a clean step to finish
            {'uuid': uuidutils.generate_uuid(),
             'power_state': ironic_states.POWER_ON,
             'provision_state': ironic_states.CLEANWAIT},
            # a node in deleting
            {'uuid': uuidutils.generate_uuid(),
             'power_state': ironic_states.POWER_ON,
             'provision_state': ironic_states.DELETING},
            # a node in deleted
            {'uuid': uuidutils.generate_uuid(),
             'power_state': ironic_states.POWER_ON,
             'provision_state': ironic_states.DELETED}
        ]
        for n in node_dicts:
            node = ironic_utils.get_test_node(**n)
            self.assertTrue(self.driver._node_resources_unavailable(node))
        for ok_state in (ironic_states.AVAILABLE, ironic_states.NOSTATE):
            # these are both ok and should present as available
            avail_node = ironic_utils.get_test_node(
                            power_state=ironic_states.POWER_OFF,
                            provision_state=ok_state)
            unavailable = self.driver._node_resources_unavailable(avail_node)
            self.assertFalse(unavailable)
    def test__node_resources_used(self):
        node_dicts = [
            # a node in maintenance /w instance and active
            {'uuid': uuidutils.generate_uuid(),
             'instance_uuid': uuidutils.generate_uuid(),
             'provision_state': ironic_states.ACTIVE},
        ]
        for n in node_dicts:
            node = ironic_utils.get_test_node(**n)
            self.assertTrue(self.driver._node_resources_used(node))
        unused_node = ironic_utils.get_test_node(
            instance_uuid=None,
            provision_state=ironic_states.AVAILABLE)
        self.assertFalse(self.driver._node_resources_used(unused_node))
    @mock.patch.object(FAKE_CLIENT.node, 'list')
    def test_get_available_nodes(self, mock_list):
        node_dicts = [
            # a node in maintenance /w no instance and power OFF
            {'uuid': uuidutils.generate_uuid(),
             'maintenance': True,
             'power_state': ironic_states.POWER_OFF},
            # a node /w instance and power ON
            {'uuid': uuidutils.generate_uuid(),
             'instance_uuid': self.instance_uuid,
             'power_state': ironic_states.POWER_ON},
            # a node not in maintenance /w no instance and bad power state
            {'uuid': uuidutils.generate_uuid(),
             'power_state': ironic_states.ERROR},
        ]
        nodes = [ironic_utils.get_test_node(**n) for n in node_dicts]
        mock_list.return_value = nodes
        available_nodes = self.driver.get_available_nodes()
        expected_uuids = [n['uuid'] for n in node_dicts]
        self.assertEqual(sorted(expected_uuids), sorted(available_nodes))
    @mock.patch.object(FAKE_CLIENT.node, 'get')
    @mock.patch.object(FAKE_CLIENT.node, 'list')
    @mock.patch.object(ironic_driver.IronicDriver, '_node_resource')
    def test_get_available_resource(self, mock_nr, mock_list, mock_get):
        node = ironic_utils.get_test_node()
        node_2 = ironic_utils.get_test_node(uuid=uuidutils.generate_uuid())
        fake_resource = 'fake-resource'
        mock_get.return_value = node
        # ensure cache gets populated without the node we want
        mock_list.return_value = [node_2]
        mock_nr.return_value = fake_resource
        result = self.driver.get_available_resource(node.uuid)
        self.assertEqual(fake_resource, result)
        mock_nr.assert_called_once_with(node)
        mock_get.assert_called_once_with(node.uuid)
    @mock.patch.object(FAKE_CLIENT.node, 'get')
    @mock.patch.object(FAKE_CLIENT.node, 'list')
    @mock.patch.object(ironic_driver.IronicDriver, '_node_resource')
    def test_get_available_resource_with_cache(self, mock_nr, mock_list,
                                               mock_get):
        node = ironic_utils.get_test_node()
        fake_resource = 'fake-resource'
        mock_list.return_value = [node]
        mock_nr.return_value = fake_resource
        # populate the cache
        self.driver.get_available_nodes(refresh=True)
        mock_list.reset_mock()
        result = self.driver.get_available_resource(node.uuid)
        self.assertEqual(fake_resource, result)
        self.assertEqual(0, mock_list.call_count)
        self.assertEqual(0, mock_get.call_count)
        mock_nr.assert_called_once_with(node)
    @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
    def test_get_info(self, mock_gbiu):
        properties = {'memory_mb': 512, 'cpus': 2}
        power_state = ironic_states.POWER_ON
        node = ironic_utils.get_test_node(instance_uuid=self.instance_uuid,
                                          properties=properties,
                                          power_state=power_state)
        mock_gbiu.return_value = node
        # ironic_states.POWER_ON should be mapped to
        # nova_states.RUNNING
        memory_kib = properties['memory_mb'] * 1024
        instance = fake_instance.fake_instance_obj('fake-context',
                                                   uuid=self.instance_uuid)
        result = self.driver.get_info(instance)
        self.assertEqual(hardware.InstanceInfo(state=nova_states.RUNNING,
                                               max_mem_kb=memory_kib,
                                               mem_kb=memory_kib,
                                               num_cpu=properties['cpus']),
                         result)
    @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
    def test_get_info_http_not_found(self, mock_gbiu):
        mock_gbiu.side_effect = ironic_exception.NotFound()
        instance = fake_instance.fake_instance_obj(
                                  self.ctx, uuid=uuidutils.generate_uuid())
        result = self.driver.get_info(instance)
        self.assertEqual(hardware.InstanceInfo(state=nova_states.NOSTATE),
                         result)
    @mock.patch.object(FAKE_CLIENT, 'node')
    def test_macs_for_instance(self, mock_node):
        node = ironic_utils.get_test_node()
        port = ironic_utils.get_test_port()
        mock_node.get.return_value = node
        mock_node.list_ports.return_value = [port]
        instance = fake_instance.fake_instance_obj(self.ctx,
                                                   node=node.uuid)
        result = self.driver.macs_for_instance(instance)
        self.assertEqual(set([port.address]), result)
        mock_node.list_ports.assert_called_once_with(node.uuid)
    @mock.patch.object(FAKE_CLIENT.node, 'get')
    def test_macs_for_instance_http_not_found(self, mock_get):
        mock_get.side_effect = ironic_exception.NotFound()
        instance = fake_instance.fake_instance_obj(
                                  self.ctx, node=uuidutils.generate_uuid())
        result = self.driver.macs_for_instance(instance)
        self.assertIsNone(result)
    @mock.patch.object(objects.Instance, 'save')
    @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
    @mock.patch.object(FAKE_CLIENT, 'node')
    @mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
    @mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
    @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
    @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
    def _test_spawn(self, mock_sf, mock_pvifs, mock_adf, mock_wait_active,
                    mock_node, mock_looping, mock_save):
        node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
        node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
        instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
        fake_flavor = objects.Flavor(ephemeral_gb=0)
        instance.flavor = fake_flavor
        mock_node.get.return_value = node
        mock_node.validate.return_value = ironic_utils.get_test_validation()
        mock_node.get_by_instance_uuid.return_value = node
        mock_node.set_provision_state.return_value = mock.MagicMock()
        fake_looping_call = FakeLoopingCall()
        mock_looping.return_value = fake_looping_call
        self.driver.spawn(self.ctx, instance, None, [], None)
        mock_node.get.assert_called_once_with(node_uuid)
        mock_node.validate.assert_called_once_with(node_uuid)
        mock_adf.assert_called_once_with(node, instance, None, fake_flavor)
        mock_pvifs.assert_called_once_with(node, instance, None)
        mock_sf.assert_called_once_with(instance, None)
        mock_node.set_provision_state.assert_called_once_with(node_uuid,
                                                'active', configdrive=mock.ANY)
        self.assertIsNone(instance.default_ephemeral_device)
        self.assertFalse(mock_save.called)
        mock_looping.assert_called_once_with(mock_wait_active,
                                             FAKE_CLIENT_WRAPPER,
                                             instance)
        fake_looping_call.start.assert_called_once_with(
            interval=CONF.ironic.api_retry_interval)
        fake_looping_call.wait.assert_called_once_with()
    @mock.patch.object(ironic_driver.IronicDriver, '_generate_configdrive')
    @mock.patch.object(configdrive, 'required_by')
    def test_spawn(self, mock_required_by, mock_configdrive):
        mock_required_by.return_value = False
        self._test_spawn()
        # assert configdrive was not generated
        self.assertFalse(mock_configdrive.called)
    @mock.patch.object(ironic_driver.IronicDriver, '_generate_configdrive')
    @mock.patch.object(configdrive, 'required_by')
    def test_spawn_with_configdrive(self, mock_required_by, mock_configdrive):
        mock_required_by.return_value = True
        self._test_spawn()
        # assert configdrive was generated
        mock_configdrive.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY,
                                                 extra_md={}, files=[])
    @mock.patch.object(configdrive, 'required_by')
    @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
    @mock.patch.object(FAKE_CLIENT, 'node')
    @mock.patch.object(ironic_driver.IronicDriver, 'destroy')
    @mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
    @mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
    @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
    @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
    def test_spawn_destroyed_after_failure(self, mock_sf, mock_pvifs, mock_adf,
                                           mock_wait_active, mock_destroy,
                                           mock_node, mock_looping,
                                           mock_required_by):
        mock_required_by.return_value = False
        node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
        node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
        fake_flavor = objects.Flavor(ephemeral_gb=0)
        instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
        instance.flavor = fake_flavor
        mock_node.get.return_value = node
        mock_node.validate.return_value = ironic_utils.get_test_validation()
        mock_node.get_by_instance_uuid.return_value = node
        mock_node.set_provision_state.return_value = mock.MagicMock()
        fake_looping_call = FakeLoopingCall()
        mock_looping.return_value = fake_looping_call
        deploy_exc = exception.InstanceDeployFailure('foo')
        fake_looping_call.wait.side_effect = deploy_exc
        self.assertRaises(
            exception.InstanceDeployFailure,
            self.driver.spawn, self.ctx, instance, None, [], None)
        mock_destroy.assert_called_once_with(self.ctx, instance, None)
    @mock.patch.object(FAKE_CLIENT.node, 'update')
    def test__add_driver_fields_good(self, mock_update):
        node = ironic_utils.get_test_node(driver='fake')
        instance = fake_instance.fake_instance_obj(self.ctx,
                                                   node=node.uuid)
        image_meta = ironic_utils.get_test_image_meta()
        flavor = ironic_utils.get_test_flavor()
        self.driver._add_driver_fields(node, instance, image_meta, flavor)
        expected_patch = [{'path': '/instance_info/image_source', 'op': 'add',
                           'value': image_meta['id']},
                          {'path': '/instance_info/root_gb', 'op': 'add',
                           'value': str(instance.root_gb)},
                          {'path': '/instance_info/swap_mb', 'op': 'add',
                           'value': str(flavor['swap'])},
                          {'path': '/instance_info/display_name',
                           'value': instance.display_name, 'op': 'add'},
                          {'path': '/instance_uuid', 'op': 'add',
                           'value': instance.uuid}]
        mock_update.assert_called_once_with(node.uuid, expected_patch)
    @mock.patch.object(FAKE_CLIENT.node, 'update')
    def test__add_driver_fields_fail(self, mock_update):
        mock_update.side_effect = ironic_exception.BadRequest()
        node = ironic_utils.get_test_node(driver='fake')
        instance = fake_instance.fake_instance_obj(self.ctx,
                                                   node=node.uuid)
        image_meta = ironic_utils.get_test_image_meta()
        flavor = ironic_utils.get_test_flavor()
        self.assertRaises(exception.InstanceDeployFailure,
                          self.driver._add_driver_fields,
                          node, instance, image_meta, flavor)
    @mock.patch.object(FAKE_CLIENT.node, 'update')
    def test__cleanup_deploy_good_with_flavor(self, mock_update):
        node = ironic_utils.get_test_node(driver='fake',
                                          instance_uuid=self.instance_uuid)
        instance = fake_instance.fake_instance_obj(self.ctx,
                                                   node=node.uuid)
        flavor = ironic_utils.get_test_flavor(extra_specs={})
        self.driver._cleanup_deploy(self.ctx, node, instance, None,
                                    flavor=flavor)
        expected_patch = [{'path': '/instance_uuid', 'op': 'remove'}]
        mock_update.assert_called_once_with(node.uuid, expected_patch)
    @mock.patch.object(ironic_driver, '_validate_instance_and_node')
    @mock.patch.object(FAKE_CLIENT.node, 'update')
    def test__cleanup_deploy_instance_already_removed(self, mock_update,
                                                      mock_validate):
        mock_validate.side_effect = exception.InstanceNotFound(
            instance_id='fake-instance')
        node = ironic_utils.get_test_node(driver='fake',
                                          instance_uuid=self.instance_uuid)
        instance = fake_instance.fake_instance_obj(self.ctx,
                                                   node=node.uuid)
        flavor = ironic_utils.get_test_flavor(extra_specs={})
        self.driver._cleanup_deploy(self.ctx, node, instance, None,
                                    flavor=flavor)
        # assert node.update is not called
        self.assertFalse(mock_update.called)
        mock_validate.assert_called_once_with(mock.ANY, instance)
    @mock.patch.object(FAKE_CLIENT.node, 'update')
    def test__cleanup_deploy_without_flavor(self, mock_update):
        node = ironic_utils.get_test_node(driver='fake',
                                          instance_uuid=self.instance_uuid)
        flavor = ironic_utils.get_test_flavor(extra_specs={})
        instance = fake_instance.fake_instance_obj(self.ctx,
                                                   node=node.uuid)
        instance.flavor = flavor
        self.driver._cleanup_deploy(self.ctx, node, instance, None)
        expected_patch = [{'path': '/instance_uuid', 'op': 'remove'}]
        mock_update.assert_called_once_with(node.uuid, expected_patch)
    @mock.patch.object(FAKE_CLIENT.node, 'update')
    def test__cleanup_deploy_fail(self, mock_update):
        mock_update.side_effect = ironic_exception.BadRequest()
        node = ironic_utils.get_test_node(driver='fake',
                                          instance_uuid=self.instance_uuid)
        flavor = ironic_utils.get_test_flavor(extra_specs={})
        instance = fake_instance.fake_instance_obj(self.ctx,
                                                   node=node.uuid)
        instance.flavor = flavor
        self.assertRaises(exception.InstanceTerminationFailure,
                          self.driver._cleanup_deploy,
                          self.ctx, node, instance, None)
    @mock.patch.object(configdrive, 'required_by')
    @mock.patch.object(FAKE_CLIENT, 'node')
    def test_spawn_node_driver_validation_fail(self, mock_node,
                                               mock_required_by):
        mock_required_by.return_value = False
        node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
        node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
        flavor = ironic_utils.get_test_flavor()
        instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
        instance.flavor = flavor
        mock_node.validate.return_value = ironic_utils.get_test_validation(
            power=False, deploy=False)
        mock_node.get.return_value = node
        image_meta = ironic_utils.get_test_image_meta()
        self.assertRaises(exception.ValidationError, self.driver.spawn,
                          self.ctx, instance, image_meta, [], None)
        mock_node.get.assert_called_once_with(node_uuid)
        mock_node.validate.assert_called_once_with(node_uuid)
    @mock.patch.object(configdrive, 'required_by')
    @mock.patch.object(FAKE_CLIENT, 'node')
    @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
    @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
    @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
    def test_spawn_node_prepare_for_deploy_fail(self, mock_cleanup_deploy,
                                                mock_pvifs, mock_sf,
                                                mock_node, mock_required_by):
        mock_required_by.return_value = False
        node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
        node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
        flavor = ironic_utils.get_test_flavor()
        instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
        instance.flavor = flavor
        mock_node.get.return_value = node
        mock_node.validate.return_value = ironic_utils.get_test_validation()
        image_meta = ironic_utils.get_test_image_meta()
        class TestException(Exception):
            pass
        mock_sf.side_effect = TestException()
        self.assertRaises(TestException, self.driver.spawn,
                          self.ctx, instance, image_meta, [], None)
        mock_node.get.assert_called_once_with(node_uuid)
        mock_node.validate.assert_called_once_with(node_uuid)
        mock_cleanup_deploy.assert_called_with(self.ctx, node, instance, None,
                                               flavor=flavor)
    @mock.patch.object(configdrive, 'required_by')
    @mock.patch.object(FAKE_CLIENT, 'node')
    @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
    @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
    @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
    def test_spawn_node_trigger_deploy_fail(self, mock_cleanup_deploy,
                                            mock_pvifs, mock_sf,
                                            mock_node, mock_required_by):
        mock_required_by.return_value = False
        node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
        node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
        flavor = ironic_utils.get_test_flavor()
        instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
        instance.flavor = flavor
        image_meta = ironic_utils.get_test_image_meta()
        mock_node.get.return_value = node
        mock_node.validate.return_value = ironic_utils.get_test_validation()
        mock_node.set_provision_state.side_effect = exception.NovaException()
        self.assertRaises(exception.NovaException, self.driver.spawn,
                          self.ctx, instance, image_meta, [], None)
        mock_node.get.assert_called_once_with(node_uuid)
        mock_node.validate.assert_called_once_with(node_uuid)
        mock_cleanup_deploy.assert_called_once_with(self.ctx, node,
                                                    instance, None,
                                                    flavor=flavor)
    @mock.patch.object(configdrive, 'required_by')
    @mock.patch.object(FAKE_CLIENT, 'node')
    @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
    @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
    @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
    def test_spawn_node_trigger_deploy_fail2(self, mock_cleanup_deploy,
                                             mock_pvifs, mock_sf,
                                             mock_node, mock_required_by):
        mock_required_by.return_value = False
        node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
        node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
        flavor = ironic_utils.get_test_flavor()
        instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
        instance.flavor = flavor
        image_meta = ironic_utils.get_test_image_meta()
        mock_node.get.return_value = node
        mock_node.validate.return_value = ironic_utils.get_test_validation()
        mock_node.set_provision_state.side_effect = ironic_exception.BadRequest
        self.assertRaises(ironic_exception.BadRequest,
                          self.driver.spawn,
                          self.ctx, instance, image_meta, [], None)
        mock_node.get.assert_called_once_with(node_uuid)
        mock_node.validate.assert_called_once_with(node_uuid)
        mock_cleanup_deploy.assert_called_once_with(self.ctx, node,
                                                    instance, None,
                                                    flavor=flavor)
    @mock.patch.object(configdrive, 'required_by')
    @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
    @mock.patch.object(FAKE_CLIENT, 'node')
    @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
    @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
    @mock.patch.object(ironic_driver.IronicDriver, 'destroy')
    def test_spawn_node_trigger_deploy_fail3(self, mock_destroy,
                                             mock_pvifs, mock_sf,
                                             mock_node, mock_looping,
                                             mock_required_by):
        mock_required_by.return_value = False
        node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
        node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
        flavor = ironic_utils.get_test_flavor()
        instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
        instance.flavor = flavor
        image_meta = ironic_utils.get_test_image_meta()
        mock_node.get.return_value = node
        mock_node.validate.return_value = ironic_utils.get_test_validation()
        fake_looping_call = FakeLoopingCall()
        mock_looping.return_value = fake_looping_call
        fake_looping_call.wait.side_effect = ironic_exception.BadRequest
        fake_net_info = utils.get_test_network_info()
        self.assertRaises(ironic_exception.BadRequest,
                          self.driver.spawn, self.ctx, instance,
                          image_meta, [], None, fake_net_info)
        mock_destroy.assert_called_once_with(self.ctx, instance,
                                             fake_net_info)
    @mock.patch.object(configdrive, 'required_by')
    @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
    @mock.patch.object(objects.Instance, 'save')
    @mock.patch.object(FAKE_CLIENT, 'node')
    @mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
    @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
    @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
    def test_spawn_sets_default_ephemeral_device(self, mock_sf, mock_pvifs,
                                                 mock_wait, mock_node,
                                                 mock_save, mock_looping,
                                                 mock_required_by):
        mock_required_by.return_value = False
        node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
        node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
        flavor = ironic_utils.get_test_flavor(ephemeral_gb=1)
        instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
        instance.flavor = flavor
        mock_node.get_by_instance_uuid.return_value = node
        mock_node.set_provision_state.return_value = mock.MagicMock()
        image_meta = ironic_utils.get_test_image_meta()
        self.driver.spawn(self.ctx, instance, image_meta, [], None)
        self.assertTrue(mock_save.called)
        self.assertEqual('/dev/sda1', instance.default_ephemeral_device)
    @mock.patch.object(FAKE_CLIENT, 'node')
    @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
    def test_destroy(self, mock_cleanup_deploy, mock_node):
        node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
        network_info = 'foo'
        node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
                                          provision_state=ironic_states.ACTIVE)
        instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
        def fake_set_provision_state(*_):
            node.provision_state = None
        mock_node.get_by_instance_uuid.return_value = node
        mock_node.set_provision_state.side_effect = fake_set_provision_state
        self.driver.destroy(self.ctx, instance, network_info, None)
        mock_node.set_provision_state.assert_called_once_with(node_uuid,
                                                              'deleted')
        mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
        mock_cleanup_deploy.assert_called_with(self.ctx, node,
                                               instance, network_info)
    @mock.patch.object(FAKE_CLIENT, 'node')
    @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
    def test_destroy_ignore_unexpected_state(self, mock_cleanup_deploy,
                                             mock_node):
        node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
        network_info = 'foo'
        node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
                                        provision_state=ironic_states.DELETING)
        instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
        mock_node.get_by_instance_uuid.return_value = node
        self.driver.destroy(self.ctx, instance, network_info, None)
        self.assertFalse(mock_node.set_provision_state.called)
        mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
        mock_cleanup_deploy.assert_called_with(self.ctx, node, instance,
                                               network_info)
    @mock.patch.object(FAKE_CLIENT, 'node')
    @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
    def _test_destroy_cleaning(self, mock_cleanup_deploy, mock_node,
                               state=None):
        node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
        network_info = 'foo'
        node = ironic_utils.get_test_node(
            driver='fake', uuid=node_uuid,
            provision_state=state)
        instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
        mock_node.get_by_instance_uuid.return_value = node
        self.driver.destroy(self.ctx, instance, network_info, None)
        self.assertFalse(mock_node.set_provision_state.called)
        mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
        mock_cleanup_deploy.assert_called_with(self.ctx, node, instance,
                                               network_info)
    def test_destroy_cleaning(self):
        self._test_destroy_cleaning(state=ironic_states.CLEANING)
    def test_destroy_cleanwait(self):
        self._test_destroy_cleaning(state=ironic_states.CLEANWAIT)
    @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
    @mock.patch.object(ironic_driver, '_validate_instance_and_node')
    def test_destroy_trigger_undeploy_fail(self, fake_validate, mock_sps):
        node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
        node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
                                          provision_state=ironic_states.ACTIVE)
        fake_validate.return_value = node
        instance = fake_instance.fake_instance_obj(self.ctx,
                                                   node=node_uuid)
        mock_sps.side_effect = exception.NovaException()
        self.assertRaises(exception.NovaException, self.driver.destroy,
                          self.ctx, instance, None, None)
    @mock.patch.object(ironic_driver, '_validate_instance_and_node')
    def _test__unprovision_instance(self, mock_validate_inst, state=None):
        fake_ironic_client = mock.Mock()
        node = ironic_utils.get_test_node(
            driver='fake',
            provision_state=state)
        instance = fake_instance.fake_instance_obj(self.ctx, node=node.uuid)
        mock_validate_inst.return_value = node
        self.driver._unprovision(fake_ironic_client, instance, node)
        mock_validate_inst.assert_called_once_with(fake_ironic_client,
                                                   instance)
        fake_ironic_client.call.assert_called_once_with(
            "node.set_provision_state", node.uuid, "deleted")
    def test__unprovision_cleaning(self):
        self._test__unprovision_instance(state=ironic_states.CLEANING)
    def test__unprovision_cleanwait(self):
        self._test__unprovision_instance(state=ironic_states.CLEANWAIT)
    @mock.patch.object(ironic_driver, '_validate_instance_and_node')
    def test__unprovision_fail_max_retries(self, mock_validate_inst):
        CONF.set_default('api_max_retries', default=2, group='ironic')
        fake_ironic_client = mock.Mock()
        node = ironic_utils.get_test_node(
            driver='fake',
            provision_state=ironic_states.ACTIVE)
        instance = fake_instance.fake_instance_obj(self.ctx, node=node.uuid)
        mock_validate_inst.return_value = node
        self.assertRaises(exception.NovaException, self.driver._unprovision,
                          fake_ironic_client, instance, node)
        expected_calls = (mock.call(mock.ANY, instance),
                          mock.call(mock.ANY, instance))
        mock_validate_inst.assert_has_calls(expected_calls)
        fake_ironic_client.call.assert_called_once_with(
            "node.set_provision_state", node.uuid, "deleted")
    @mock.patch.object(ironic_driver, '_validate_instance_and_node')
    def test__unprovision_instance_not_found(self, mock_validate_inst):
        fake_ironic_client = mock.Mock()
        node = ironic_utils.get_test_node(
            driver='fake', provision_state=ironic_states.DELETING)
        instance = fake_instance.fake_instance_obj(self.ctx, node=node.uuid)
        mock_validate_inst.side_effect = exception.InstanceNotFound(
            instance_id='fake')
        self.driver._unprovision(fake_ironic_client, instance, node)
        mock_validate_inst.assert_called_once_with(fake_ironic_client,
                                                   instance)
        fake_ironic_client.call.assert_called_once_with(
            "node.set_provision_state", node.uuid, "deleted")
    @mock.patch.object(FAKE_CLIENT, 'node')
    def test_destroy_unassociate_fail(self, mock_node):
        node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
        node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
                                          provision_state=ironic_states.ACTIVE)
        instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
        mock_node.get_by_instance_uuid.return_value = node
        mock_node.update.side_effect = exception.NovaException()
        self.assertRaises(exception.NovaException, self.driver.destroy,
                          self.ctx, instance, None, None)
        mock_node.set_provision_state.assert_called_once_with(node_uuid,
                                                              'deleted')
        mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
    @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
    @mock.patch.object(ironic_driver, '_validate_instance_and_node')
    @mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
    def test_reboot(self, mock_sp, fake_validate, mock_looping):
        node = ironic_utils.get_test_node()
        fake_validate.side_effect = [node, node]
        fake_looping_call = FakeLoopingCall()
        mock_looping.return_value = fake_looping_call
        instance = fake_instance.fake_instance_obj(self.ctx,
                                                   node=node.uuid)
        self.driver.reboot(self.ctx, instance, None, None)
        mock_sp.assert_called_once_with(node.uuid, 'reboot')
    @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
    @mock.patch.object(ironic_driver, '_validate_instance_and_node')
    @mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
    def test_power_off(self, mock_sp, fake_validate, mock_looping):
        self._test_power_on_off(mock_sp, fake_validate, mock_looping,
                                method_name='power_off')
    @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
    @mock.patch.object(ironic_driver, '_validate_instance_and_node')
    @mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
    def test_power_on(self, mock_sp, fake_validate, mock_looping):
        self._test_power_on_off(mock_sp, fake_validate, mock_looping,
                                method_name='power_on')
    def _test_power_on_off(self, mock_sp, fake_validate, mock_looping,
                           method_name=None):
        node = ironic_utils.get_test_node()
        fake_validate.side_effect = [node, node]
        fake_looping_call = FakeLoopingCall()
        mock_looping.return_value = fake_looping_call
        instance = fake_instance.fake_instance_obj(self.ctx,
                                                   node=self.instance_uuid)
        # Call the method under test here
        if method_name == 'power_on':
            self.driver.power_on(self.ctx, instance,
                                 utils.get_test_network_info())
            mock_sp.assert_called_once_with(node.uuid, 'on')
        elif method_name == 'power_off':
            self.driver.power_off(instance)
            mock_sp.assert_called_once_with(node.uuid, 'off')
    @mock.patch.object(FAKE_CLIENT.node, 'list_ports')
    @mock.patch.object(FAKE_CLIENT.port, 'update')
    @mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
    def test_plug_vifs_with_port(self, mock_uvifs, mock_port_udt, mock_lp):
        node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
        node = ironic_utils.get_test_node(uuid=node_uuid)
        port = ironic_utils.get_test_port()
        mock_lp.return_value = [port]
        instance = fake_instance.fake_instance_obj(self.ctx,
                                                   node=node_uuid)
        network_info = utils.get_test_network_info()
        port_id = six.text_type(network_info[0]['id'])
        expected_patch = [{'op': 'add',
                           'path': '/extra/vif_port_id',
                           'value': port_id}]
        self.driver._plug_vifs(node, instance, network_info)
        # asserts
        mock_uvifs.assert_called_once_with(node, instance, network_info)
        mock_lp.assert_called_once_with(node_uuid)
        mock_port_udt.assert_called_with(port.uuid, expected_patch)
    @mock.patch.object(FAKE_CLIENT.node, 'get')
    @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
    def test_plug_vifs(self, mock__plug_vifs, mock_get):
        node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
        node = ironic_utils.get_test_node(uuid=node_uuid)
        mock_get.return_value = node
        instance = fake_instance.fake_instance_obj(self.ctx,
                                                   node=node_uuid)
        network_info = utils.get_test_network_info()
        self.driver.plug_vifs(instance, network_info)
        mock_get.assert_called_once_with(node_uuid)
        mock__plug_vifs.assert_called_once_with(node, instance, network_info)
    @mock.patch.object(FAKE_CLIENT.port, 'update')
    @mock.patch.object(FAKE_CLIENT.node, 'list_ports')
    @mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
    def test_plug_vifs_count_mismatch(self, mock_uvifs, mock_lp,
                                      mock_port_udt):
        node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
        node = ironic_utils.get_test_node(uuid=node_uuid)
        port = ironic_utils.get_test_port()
        mock_lp.return_value = [port]
        instance = fake_instance.fake_instance_obj(self.ctx,
                                                   node=node_uuid)
        # len(network_info) > len(ports)
        network_info = (utils.get_test_network_info() +
                        utils.get_test_network_info())
        self.assertRaises(exception.NovaException,
                          self.driver._plug_vifs, node, instance,
                          network_info)
        # asserts
        mock_uvifs.assert_called_once_with(node, instance, network_info)
        mock_lp.assert_called_once_with(node_uuid)
        # assert port.update() was not called
        self.assertFalse(mock_port_udt.called)
    @mock.patch.object(FAKE_CLIENT.port, 'update')
    @mock.patch.object(FAKE_CLIENT.node, 'list_ports')
    @mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
    def test_plug_vifs_no_network_info(self, mock_uvifs, mock_lp,
                                       mock_port_udt):
        node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
        node = ironic_utils.get_test_node(uuid=node_uuid)
        port = ironic_utils.get_test_port()
        mock_lp.return_value = [port]
        instance = fake_instance.fake_instance_obj(self.ctx,
                                                   node=node_uuid)
        network_info = []
        self.driver._plug_vifs(node, instance, network_info)
        # asserts
        mock_uvifs.assert_called_once_with(node, instance, network_info)
        mock_lp.assert_called_once_with(node_uuid)
        # assert port.update() was not called
        self.assertFalse(mock_port_udt.called)
    @mock.patch.object(FAKE_CLIENT.port, 'update')
    @mock.patch.object(FAKE_CLIENT, 'node')
    def test_unplug_vifs(self, mock_node, mock_update):
        node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
        node = ironic_utils.get_test_node(uuid=node_uuid)
        port = ironic_utils.get_test_port(extra={'vif_port_id': 'fake-vif'})
        mock_node.get.return_value = node
        mock_node.list_ports.return_value = [port]
        instance = fake_instance.fake_instance_obj(self.ctx,
                                                   node=node_uuid)
        expected_patch = [{'op': 'remove', 'path':
                           '/extra/vif_port_id'}]
        self.driver.unplug_vifs(instance,
                                utils.get_test_network_info())
        # asserts
        mock_node.get.assert_called_once_with(node_uuid)
        mock_node.list_ports.assert_called_once_with(node_uuid, detail=True)
        mock_update.assert_called_once_with(port.uuid, expected_patch)
    @mock.patch.object(FAKE_CLIENT.port, 'update')
    @mock.patch.object(FAKE_CLIENT, 'node')
    def test_unplug_vifs_port_not_associated(self, mock_node, mock_update):
        node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
        node = ironic_utils.get_test_node(uuid=node_uuid)
        port = ironic_utils.get_test_port(extra={})
        mock_node.get.return_value = node
        mock_node.list_ports.return_value = [port]
        instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
        self.driver.unplug_vifs(instance, utils.get_test_network_info())
        mock_node.get.assert_called_once_with(node_uuid)
        mock_node.list_ports.assert_called_once_with(node_uuid, detail=True)
        # assert port.update() was not called
        self.assertFalse(mock_update.called)
    @mock.patch.object(FAKE_CLIENT.port, 'update')
    def test_unplug_vifs_no_network_info(self, mock_update):
        instance = fake_instance.fake_instance_obj(self.ctx)
        network_info = []
        self.driver.unplug_vifs(instance, network_info)
        # assert port.update() was not called
        self.assertFalse(mock_update.called)
    @mock.patch.object(firewall.NoopFirewallDriver, 'unfilter_instance',
                       create=True)
    def test_unfilter_instance(self, mock_ui):
        instance = fake_instance.fake_instance_obj(self.ctx)
        network_info = utils.get_test_network_info()
        self.driver.unfilter_instance(instance, network_info)
        mock_ui.assert_called_once_with(instance, network_info)
    @mock.patch.object(firewall.NoopFirewallDriver, 'setup_basic_filtering',
                       create=True)
    @mock.patch.object(firewall.NoopFirewallDriver, 'prepare_instance_filter',
                       create=True)
    def test_ensure_filtering_rules_for_instance(self, mock_pif, mock_sbf):
        instance = fake_instance.fake_instance_obj(self.ctx)
        network_info = utils.get_test_network_info()
        self.driver.ensure_filtering_rules_for_instance(instance,
                                                        network_info)
        mock_sbf.assert_called_once_with(instance, network_info)
        mock_pif.assert_called_once_with(instance, network_info)
    @mock.patch.object(firewall.NoopFirewallDriver,
                       'refresh_instance_security_rules', create=True)
    def test_refresh_instance_security_rules(self, mock_risr):
        instance = fake_instance.fake_instance_obj(self.ctx)
        self.driver.refresh_instance_security_rules(instance)
        mock_risr.assert_called_once_with(instance)
    @mock.patch.object(firewall.NoopFirewallDriver,
                       'refresh_provider_fw_rules', create=True)
    def test_refresh_provider_fw_rules(self, mock_rpfr):
        fake_instance.fake_instance_obj(self.ctx)
        self.driver.refresh_provider_fw_rules()
        mock_rpfr.assert_called_once_with()
    @mock.patch.object(firewall.NoopFirewallDriver,
                       'refresh_security_group_members', create=True)
    def test_refresh_security_group_members(self, mock_rsgm):
        fake_group = 'fake-security-group-members'
        self.driver.refresh_security_group_members(fake_group)
        mock_rsgm.assert_called_once_with(fake_group)
    @mock.patch.object(firewall.NoopFirewallDriver,
                      'refresh_instance_security_rules', create=True)
    def test_refresh_security_group_rules(self, mock_risr):
        fake_group = 'fake-security-group-members'
        self.driver.refresh_instance_security_rules(fake_group)
        mock_risr.assert_called_once_with(fake_group)
    @mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
    @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
    @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
    @mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
    @mock.patch.object(FAKE_CLIENT.node, 'get')
    @mock.patch.object(objects.Instance, 'save')
    def _test_rebuild(self, mock_save, mock_get, mock_driver_fields,
                      mock_set_pstate, mock_looping, mock_wait_active,
                      preserve=False):
        node_uuid = uuidutils.generate_uuid()
        node = ironic_utils.get_test_node(uuid=node_uuid,
                                          instance_uuid=self.instance_uuid,
                                          instance_type_id=5)
        mock_get.return_value = node
        image_meta = ironic_utils.get_test_image_meta()
        flavor_id = 5
        flavor = objects.Flavor(flavor_id=flavor_id, name='baremetal')
        instance = fake_instance.fake_instance_obj(self.ctx,
                                                   uuid=self.instance_uuid,
                                                   node=node_uuid,
                                                   instance_type_id=flavor_id)
        instance.flavor = flavor
        fake_looping_call = FakeLoopingCall()
        mock_looping.return_value = fake_looping_call
        self.driver.rebuild(
            context=self.ctx, instance=instance, image_meta=image_meta,
            injected_files=None, admin_password=None, bdms=None,
            detach_block_devices=None, attach_block_devices=None,
            preserve_ephemeral=preserve)
        mock_save.assert_called_once_with(
            expected_task_state=[task_states.REBUILDING])
        mock_driver_fields.assert_called_once_with(node, instance, image_meta,
                                                   flavor, preserve)
        mock_set_pstate.assert_called_once_with(node_uuid,
                                                ironic_states.REBUILD)
        mock_looping.assert_called_once_with(mock_wait_active,
                                             FAKE_CLIENT_WRAPPER,
                                             instance)
        fake_looping_call.start.assert_called_once_with(
            interval=CONF.ironic.api_retry_interval)
        fake_looping_call.wait.assert_called_once_with()
    def test_rebuild_preserve_ephemeral(self):
        self._test_rebuild(preserve=True)
    def test_rebuild_no_preserve_ephemeral(self):
        self._test_rebuild(preserve=False)
    @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
    @mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
    @mock.patch.object(FAKE_CLIENT.node, 'get')
    @mock.patch.object(objects.Instance, 'save')
    def test_rebuild_failures(self, mock_save, mock_get, mock_driver_fields,
                              mock_set_pstate):
        node_uuid = uuidutils.generate_uuid()
        node = ironic_utils.get_test_node(uuid=node_uuid,
                                          instance_uuid=self.instance_uuid,
                                          instance_type_id=5)
        mock_get.return_value = node
        image_meta = ironic_utils.get_test_image_meta()
        flavor_id = 5
        flavor = objects.Flavor(flavor_id=flavor_id, name='baremetal')
        instance = fake_instance.fake_instance_obj(self.ctx,
                                                   uuid=self.instance_uuid,
                                                   node=node_uuid,
                                                   instance_type_id=flavor_id)
        instance.flavor = flavor
        exceptions = [
            exception.NovaException(),
            ironic_exception.BadRequest(),
            ironic_exception.InternalServerError(),
        ]
        for e in exceptions:
            mock_set_pstate.side_effect = e
            self.assertRaises(exception.InstanceDeployFailure,
                self.driver.rebuild,
                context=self.ctx, instance=instance, image_meta=image_meta,
                injected_files=None, admin_password=None, bdms=None,
                detach_block_devices=None, attach_block_devices=None)
@mock.patch.object(instance_metadata, 'InstanceMetadata')
@mock.patch.object(configdrive, 'ConfigDriveBuilder')
class IronicDriverGenerateConfigDriveTestCase(test.NoDBTestCase):
    @mock.patch.object(cw, 'IronicClientWrapper',
                       lambda *_: FAKE_CLIENT_WRAPPER)
    def setUp(self):
        super(IronicDriverGenerateConfigDriveTestCase, self).setUp()
        self.flags(**IRONIC_FLAGS)
        self.driver = ironic_driver.IronicDriver(None)
        self.driver.virtapi = fake.FakeVirtAPI()
        self.ctx = nova_context.get_admin_context()
        node_uuid = uuidutils.generate_uuid()
        self.node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
        self.instance = fake_instance.fake_instance_obj(self.ctx,
                                                        node=node_uuid)
        self.network_info = utils.get_test_network_info()
    def test_generate_configdrive(self, mock_cd_builder, mock_instance_meta):
        mock_instance_meta.return_value = 'fake-instance'
        mock_make_drive = mock.MagicMock(make_drive=lambda *_: None)
        mock_cd_builder.return_value.__enter__.return_value = mock_make_drive
        self.driver._generate_configdrive(self.instance, self.node,
                                          self.network_info)
        mock_cd_builder.assert_called_once_with(instance_md='fake-instance')
        mock_instance_meta.assert_called_once_with(self.instance,
            network_info=self.network_info, extra_md={}, content=None)
    def test_generate_configdrive_fail(self, mock_cd_builder,
                                       mock_instance_meta):
        mock_cd_builder.side_effect = exception.ConfigDriveMountFailed(
            operation='foo', error='error')
        mock_instance_meta.return_value = 'fake-instance'
        mock_make_drive = mock.MagicMock(make_drive=lambda *_: None)
        mock_cd_builder.return_value.__enter__.return_value = mock_make_drive
        self.assertRaises(exception.ConfigDriveMountFailed,
                          self.driver._generate_configdrive,
                          self.instance, self.node, self.network_info)
        mock_cd_builder.assert_called_once_with(instance_md='fake-instance')
        mock_instance_meta.assert_called_once_with(self.instance,
            network_info=self.network_info, extra_md={}, content=None)
 | |
| 
	# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""Utility functions for NDArray and BaseSparseNDArray."""
import ctypes
from ..base import _LIB, check_call, py_str, c_str, string_types, mx_uint, NDArrayHandle
from ..base import c_array, c_handle_array, c_str_array
from .ndarray import NDArray
from .ndarray import array as _array
from .ndarray import empty as _empty_ndarray
from .ndarray import zeros as _zeros_ndarray
from .sparse import zeros as _zeros_sparse_ndarray
from .sparse import empty as _empty_sparse_ndarray
from .sparse import array as _sparse_array
from .sparse import _ndarray_cls
try:
    import scipy.sparse as spsp
except ImportError:
    spsp = None
__all__ = ['zeros', 'empty', 'array', 'load', 'load_frombuffer', 'save']
def zeros(shape, ctx=None, dtype=None, stype=None, **kwargs):
    """Return a new array of given shape and type, filled with zeros.
    Parameters
    ----------
    shape : int or tuple of int
        The shape of the empty array
    ctx : Context, optional
        An optional device context (default is the current default context)
    dtype : str or numpy.dtype, optional
        An optional value type (default is `float32`)
    stype: string, optional
        The storage type of the empty array, such as 'row_sparse', 'csr', etc.
    Returns
    -------
    NDArray, CSRNDArray or RowSparseNDArray
        A created array
    Examples
    --------
    >>> mx.nd.zeros((1,2), mx.cpu(), stype='csr')
    <CSRNDArray 1x2 @cpu(0)>
    >>> mx.nd.zeros((1,2), mx.cpu(), 'float16', stype='row_sparse').asnumpy()
    array([[ 0.,  0.]], dtype=float16)
    """
    if stype is None or stype == 'default':
        return _zeros_ndarray(shape, ctx, dtype, **kwargs)
    else:
        return _zeros_sparse_ndarray(stype, shape, ctx, dtype, **kwargs)
def empty(shape, ctx=None, dtype=None, stype=None):
    """Returns a new array of given shape and type, without initializing entries.
    Parameters
    ----------
    shape : int or tuple of int
        The shape of the empty array.
    ctx : Context, optional
        An optional device context (default is the current default context).
    dtype : str or numpy.dtype, optional
        An optional value type (default is `float32`).
    stype : str, optional
        An optional storage type (default is `default`).
    Returns
    -------
    NDArray, CSRNDArray or RowSparseNDArray
        A created array.
    Examples
    --------
    >>> mx.nd.empty(1)
    <NDArray 1 @cpu(0)>
    >>> mx.nd.empty((1,2), mx.gpu(0))
    <NDArray 1x2 @gpu(0)>
    >>> mx.nd.empty((1,2), mx.gpu(0), 'float16')
    <NDArray 1x2 @gpu(0)>
    >>> mx.nd.empty((1,2), stype='csr')
    <CSRNDArray 1x2 @cpu(0)>
    """
    if stype is None or stype == 'default':
        return _empty_ndarray(shape, ctx, dtype)
    else:
        return _empty_sparse_ndarray(stype, shape, ctx, dtype)
def array(source_array, ctx=None, dtype=None):
    """Creates an array from any object exposing the array interface.
    Parameters
    ----------
    source_array : array_like
        An object exposing the array interface, an object whose `__array__`
        method returns an array, or any (nested) sequence.
    ctx : Context, optional
        Device context (default is the current default context).
    dtype : str or numpy.dtype, optional
        The data type of the output array. The default dtype is ``source_array.dtype``
        if `source_array` is an `NDArray`, `float32` otherwise.
    Returns
    -------
    NDArray, RowSparseNDArray or CSRNDArray
        An array with the same contents as the `source_array`.
    Examples
    --------
    >>> import numpy as np
    >>> mx.nd.array([1, 2, 3])
    <NDArray 3 @cpu(0)>
    >>> mx.nd.array([[1, 2], [3, 4]])
    <NDArray 2x2 @cpu(0)>
    >>> mx.nd.array(np.zeros((3, 2)))
    <NDArray 3x2 @cpu(0)>
    >>> mx.nd.array(np.zeros((3, 2)), mx.gpu(0))
    <NDArray 3x2 @gpu(0)>
    >>> mx.nd.array(mx.nd.zeros((3, 2), stype='row_sparse'))
    <RowSparseNDArray 3x2 @cpu(0)>
    """
    if spsp is not None and isinstance(source_array, spsp.csr.csr_matrix):
        return _sparse_array(source_array, ctx=ctx, dtype=dtype)
    elif isinstance(source_array, NDArray) and source_array.stype != 'default':
        return _sparse_array(source_array, ctx=ctx, dtype=dtype)
    else:
        return _array(source_array, ctx=ctx, dtype=dtype)
def load(fname):
    """Loads an array from file.
    See more details in ``save``.
    Parameters
    ----------
    fname : str
        The filename.
    Returns
    -------
    list of NDArray, RowSparseNDArray or CSRNDArray, or \
    dict of str to NDArray, RowSparseNDArray or CSRNDArray
        Loaded data.
    """
    if not isinstance(fname, string_types):
        raise TypeError('fname required to be a string')
    out_size = mx_uint()
    out_name_size = mx_uint()
    handles = ctypes.POINTER(NDArrayHandle)()
    names = ctypes.POINTER(ctypes.c_char_p)()
    check_call(_LIB.MXNDArrayLoad(c_str(fname),
                                  ctypes.byref(out_size),
                                  ctypes.byref(handles),
                                  ctypes.byref(out_name_size),
                                  ctypes.byref(names)))
    if out_name_size.value == 0:
        return [_ndarray_cls(NDArrayHandle(handles[i])) for i in range(out_size.value)]
    else:
        assert out_name_size.value == out_size.value
        return dict(
            (py_str(names[i]), _ndarray_cls(NDArrayHandle(handles[i])))
            for i in range(out_size.value))
def load_frombuffer(buf):
    """Loads an array dictionary or list from a buffer
    See more details in ``save``.
    Parameters
    ----------
    buf : str
        Buffer containing contents of a file as a string or bytes.
    Returns
    -------
    list of NDArray, RowSparseNDArray or CSRNDArray, or \
    dict of str to NDArray, RowSparseNDArray or CSRNDArray
        Loaded data.
    """
    if not isinstance(buf, string_types + tuple([bytes])):
        raise TypeError('buf required to be a string or bytes')
    out_size = mx_uint()
    out_name_size = mx_uint()
    handles = ctypes.POINTER(NDArrayHandle)()
    names = ctypes.POINTER(ctypes.c_char_p)()
    check_call(_LIB.MXNDArrayLoadFromBuffer(buf,
                                            mx_uint(len(buf)),
                                            ctypes.byref(out_size),
                                            ctypes.byref(handles),
                                            ctypes.byref(out_name_size),
                                            ctypes.byref(names)))
    if out_name_size.value == 0:
        return [_ndarray_cls(NDArrayHandle(handles[i])) for i in range(out_size.value)]
    else:
        assert out_name_size.value == out_size.value
        return dict(
            (py_str(names[i]), _ndarray_cls(NDArrayHandle(handles[i])))
            for i in range(out_size.value))
def save(fname, data):
    """Saves a list of arrays or a dict of str->array to file.
    Examples of filenames:
    - ``/path/to/file``
    - ``s3://my-bucket/path/to/file`` (if compiled with AWS S3 supports)
    - ``hdfs://path/to/file`` (if compiled with HDFS supports)
    Parameters
    ----------
    fname : str
        The filename.
    data : NDArray, RowSparseNDArray or CSRNDArray, \
           or list of NDArray, RowSparseNDArray or CSRNDArray, \
           or dict of str to NDArray, RowSparseNDArray or CSRNDArray
        The data to save.
    Examples
    --------
    >>> x = mx.nd.zeros((2,3))
    >>> y = mx.nd.ones((1,4))
    >>> mx.nd.save('my_list', [x,y])
    >>> mx.nd.save('my_dict', {'x':x, 'y':y})
    >>> mx.nd.load('my_list')
    [<NDArray 2x3 @cpu(0)>, <NDArray 1x4 @cpu(0)>]
    >>> mx.nd.load('my_dict')
    {'y': <NDArray 1x4 @cpu(0)>, 'x': <NDArray 2x3 @cpu(0)>}
    """
    if isinstance(data, NDArray):
        data = [data]
        handles = c_array(NDArrayHandle, [])
    if isinstance(data, dict):
        str_keys = data.keys()
        nd_vals = data.values()
        if any(not isinstance(k, string_types) for k in str_keys) or \
           any(not isinstance(v, NDArray) for v in nd_vals):
            raise TypeError('save only accept dict str->NDArray or list of NDArray')
        keys = c_str_array(str_keys)
        handles = c_handle_array(nd_vals)
    elif isinstance(data, list):
        if any(not isinstance(v, NDArray) for v in data):
            raise TypeError('save only accept dict str->NDArray or list of NDArray')
        keys = None
        handles = c_handle_array(data)
    else:
        raise ValueError("data needs to either be a NDArray, dict of str, NDArray pairs "
                         "or a list of NDarrays.")
    check_call(_LIB.MXNDArraySave(c_str(fname),
                                  mx_uint(len(handles)),
                                  handles,
                                  keys))
 | |
| 
	# -*- coding: utf-8 -*-
from irc3 import plugin
from irc3 import utils
from irc3 import rfc
from irc3.dec import event
from irc3.utils import IrcString
from collections import defaultdict
__doc__ = '''
==============================================
:mod:`irc3.plugins.userlist` User list plugin
==============================================
This plugin maintain a known user list and a channel list.
..
    >>> from irc3.testing import IrcBot
Usage::
    >>> bot = IrcBot()
    >>> bot.include('irc3.plugins.userlist')
    >>> bot.test(':gawel!user@host JOIN #chan')
    >>> print(list(bot.channels['#chan'])[0])
    gawel
    >>> print(list(bot.nicks.keys())[0])
    gawel
    >>> bot.test(':gawel!user@host MODE #chan +o gawel')
    >>> print(list(bot.channels['#chan'].modes['@'])[0])
    gawel
Api
===
.. autoclass:: Channel
'''
class Channel(set):
    """A set like object which contains nicknames that are on the channel and
    user modes:
    .. code-block:: python
        >>> channel = Channel()
        >>> channel.add('gawel', modes='@')
        >>> 'gawel' in channel
        True
        >>> 'gawel' in channel.modes['@']
        True
        >>> channel.remove('gawel')
        >>> 'gawel' in channel
        False
        >>> 'gawel' in channel.modes['@']
        False
    """
    def __init__(self):
        set.__init__(self)
        self.modes = defaultdict(set)
        self.topic = None
    def add(self, item, modes=''):
        set.add(self, item)
        for mode in modes:
            self.modes[mode].add(item)
    def remove(self, item):
        set.remove(self, item)
        for items in self.modes.values():
            if item in items:
                items.remove(item)
    def __repr__(self):
        return repr(sorted(self))
@plugin
class Userlist:
    def __init__(self, context):
        self.context = context
        self.connection_lost()
    def connection_lost(self, client=None):
        self.channels = defaultdict(Channel)
        self.context.channels = self.channels
        self.nicks = {}
        self.context.nicks = self.nicks
    def broadcast(self, *args, **kwargs):
        # only usefull for servers
        pass
    @event(rfc.JOIN_PART_QUIT)
    def on_join_part_quit(self, mask=None, event=None, **kwargs):
        getattr(self, event.lower())(mask.nick, mask, **kwargs)
    @event(rfc.KICK)
    def on_kick(self, mask=None, event=None, target=None, **kwargs):
        self.part(target.nick, mask=None, **kwargs)
    def join(self, nick, mask, client=None, **kwargs):
        channel = self.channels[kwargs['channel']]
        if nick != self.context.nick:
            channel.add(mask.nick)
            self.nicks[mask.nick] = client or mask
            if client:
                self.broadcast(client=client, clients=channel, **kwargs)
    def part(self, nick, mask=None, channel=None, client=None, **kwargs):
        if nick == self.context.nick:
            del self.channels[channel]
        else:
            channel = self.channels[channel]
            self.broadcast(client=client, clients=channel, **kwargs)
            channel.remove(nick)
            if client is None and all(
                    nick not in c for c in self.channels.values()):
                del self.nicks[nick]
    def quit(self, nick, mask, channel=None, client=None, **kwargs):
        if nick == self.context.nick:
            self.connection_lost()
        else:
            clients = set()
            for channel in self.channels.values():
                if nick in channel:
                    clients.update(channel)
                    channel.remove(nick)
            self.broadcast(client=client, clients=clients, **kwargs)
            del self.nicks[nick]
    @event(rfc.NEW_NICK)
    def new_nick(self, nick=None, new_nick=None, client=None, **kwargs):
        """update list on new nick"""
        if client is None:
            self.nicks[new_nick] = new_nick + '!' + nick.host
            nick = nick.nick
        clients = {new_nick}
        for channel in self.channels.values():
            if nick in channel:
                for nicknames in channel.modes.values():
                    if nick in nicknames:
                        nicknames.add(new_nick)
                channel.remove(nick)
                clients.update(channel)
                channel.add(new_nick)
        del self.nicks[nick]
        self.broadcast(client=client, clients=clients, **kwargs)
    @event(rfc.RPL_NAMREPLY)
    def names(self, channel=None, data=None, **kwargs):
        """Initialise channel list and channel.modes"""
        statusmsg = self.context.server_config['STATUSMSG']
        nicknames = data.split(' ')
        channel = self.channels[channel]
        for item in nicknames:
            nick = item.strip(statusmsg)
            channel.add(nick, modes=item[:-len(nick)])
            self.nicks[nick] = nick
    @event(rfc.RPL_WHOREPLY)
    def who(self, channel=None, nick=None, username=None, server=None, **kw):
        """Set nick mask"""
        self.channels[channel].add(nick)
        mask = IrcString(nick + '!' + username + '@' + server)
        self.nicks[nick] = mask
    @event(rfc.MODE)
    def mode(self, target=None, modes=None, data=None, client=None, **kw):
        """Add nicknames to channel.modes"""
        if target[0] not in self.context.server_config['CHANTYPES'] \
           or not data:
            # not a channel or no user target
            return
        noargs = self.context.server_config['CHANMODES'].split(',')[-1]
        if not isinstance(data, list):
            data = [d for d in data.split(' ') if d]
        if not modes.startswith(('+', '-')):
            modes = '+' + modes
        modes = utils.parse_modes(modes, data, noargs)
        prefix = self.context.server_config['PREFIX']
        prefix = dict(zip(*prefix.strip('(').split(')')))
        channel = self.channels[target]
        for char, mode, tgt in modes:
            if mode in prefix:
                nicknames = channel.modes[prefix[mode]]
                if char == '+':
                    nicknames.add(tgt)
                elif tgt in nicknames:
                    nicknames.remove(tgt)
                if client is not None:
                    broadcast = (
                        ':{mask} MODE {target} {char}{mode} {tgt}').format(
                        char=char, mode=mode, target=target, tgt=tgt,
                        **client.data)
                    self.broadcast(client=client, broadcast=broadcast,
                                   clients=channel)
    @event(rfc.RPL_TOPIC)
    def topic(self, channel=None, data=None, client=None, **kwargs):
        self.channels[channel].topic = data
 | |
| 
	# Modified by CNSL
# 1) including TDNN based char embedding
# 06/02/17
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import torch
import torch.nn as nn
from . import layers
from .tdnn import TDNN
from .highway import Highway
import torch.nn.functional as F
import pdb
class RnnDocReader(nn.Module):
    """Network for the Document Reader module of DrQA."""
    RNN_TYPES = {'lstm': nn.LSTM, 'gru': nn.GRU, 'rnn': nn.RNN}
    def __init__(self, opt, padding_idx=0, padding_idx_char=0):
        super(RnnDocReader, self).__init__()
        # Store config
        self.opt = opt
        #Cudnn
        #if not opt['use_cudnn']:
        #    torch.backends.cudnn.enabled=False
        # Word embeddings (+1 for padding), usually initialized by GloVE
        self.embedding = nn.Embedding(opt['vocab_size'],
                                      opt['embedding_dim'],
                                      padding_idx=padding_idx)
        # Char embeddings (+1 for padding)
        #pdb.set_trace()
        if opt['add_char2word']:
            self.char_embedding = nn.Embedding(opt['vocab_size_char'],
                                               opt['embedding_dim_char'],
                                               padding_idx=padding_idx_char)
            self.char_embedding.weight = nn.Parameter(torch.Tensor(opt['vocab_size_char'],opt['embedding_dim_char']).uniform_(-1,1))
            self.TDNN = TDNN(opt)
            if opt['nLayer_Highway'] > 0 :
                self.Highway = Highway(opt['embedding_dim'] + opt['embedding_dim_TDNN'], opt['nLayer_Highway'], F.relu)
        # ...(maybe) keep them fixed  (word only)
        if opt['fix_embeddings']:
            for p in self.embedding.parameters():
                p.requires_grad = False
        # Register a buffer to (maybe) fill later for keeping *some* fixed
        if opt['tune_partial'] > 0:
            buffer_size = torch.Size((
                opt['vocab_size'] - opt['tune_partial'] - 2,
                opt['embedding_dim']
            ))
            self.register_buffer('fixed_embedding', torch.Tensor(buffer_size))
        # Projection for attention weighted question
        if opt['use_qemb']:
            if opt['add_char2word']:
                self.qemb_match = layers.SeqAttnMatch(opt['embedding_dim'] + opt['embedding_dim_TDNN'])
            else:
                self.qemb_match = layers.SeqAttnMatch(opt['embedding_dim'])
        # Input size to RNN: word emb + question emb + manual features
        if opt['add_char2word']:
            doc_input_size = opt['embedding_dim'] + opt['num_features'] + opt['embedding_dim_TDNN']
        else:
            doc_input_size = opt['embedding_dim'] + opt['num_features']
        if opt['use_qemb']:
            if opt['add_char2word']:
                doc_input_size += opt['embedding_dim'] + opt['embedding_dim_TDNN']
            else:
                doc_input_size += opt['embedding_dim']
        #pdb.set_trace()
        # RNN document encoder
        self.doc_rnn = layers.StackedBRNN(
            input_size=doc_input_size,
            hidden_size=opt['hidden_size'],
            num_layers=opt['doc_layers'],
            dropout_rate=opt['dropout_rnn'],
            dropout_output=opt['dropout_rnn_output'],
            concat_layers=opt['concat_rnn_layers'],
            rnn_type=self.RNN_TYPES[opt['rnn_type']],
            padding=opt['rnn_padding'],
            )
        # RNN question encoder
        q_input_size = opt['embedding_dim']
        if opt['add_char2word']:
            q_input_size += opt['embedding_dim_TDNN']
        self.question_rnn = layers.StackedBRNN(
            input_size=q_input_size,
            hidden_size=opt['hidden_size'],
            num_layers=opt['question_layers'],
            dropout_rate=opt['dropout_rnn'],
            dropout_output=opt['dropout_rnn_output'],
            concat_layers=opt['concat_rnn_layers'],
            rnn_type=self.RNN_TYPES[opt['rnn_type']],
            padding=opt['rnn_padding'],
            )
        # Output sizes of rnn encoders
        doc_hidden_size = 2 * opt['hidden_size']
        question_hidden_size = 2 * opt['hidden_size']
        if opt['concat_rnn_layers']:
            doc_hidden_size *= opt['doc_layers']
            question_hidden_size *= opt['question_layers']
        # Question merging
        if opt['question_merge'] not in ['avg', 'self_attn']:
            raise NotImplementedError('merge_mode = %s' % opt['merge_mode'])
        if opt['question_merge'] == 'self_attn':
            self.self_attn = layers.LinearSeqAttn(question_hidden_size)
        # Q-P matching
        opt['qp_rnn_size'] = doc_hidden_size + question_hidden_size
        if opt['qp_bottleneck']:
            opt['qp_rnn_size'] = opt['hidden_size_bottleneck']
        
        self.qp_match = layers.GatedAttentionBilinearRNN(
            x_size = doc_hidden_size,
            y_size = question_hidden_size,            
            hidden_size= opt['qp_rnn_size'],
            padding=opt['rnn_padding'],
            rnn_type=self.RNN_TYPES[opt['rnn_type']],
            birnn=opt['qp_birnn'],
            concat = opt['qp_concat'],
            gate=True
        )
        qp_matched_size = opt['qp_rnn_size']
        if opt['qp_birnn']:
            qp_matched_size = qp_matched_size * 2
        if opt['qp_concat']:
            qp_matched_size = qp_matched_size + doc_hidden_size        
 
        ## PP matching: 
        #pdb.set_trace()
             
        opt['pp_rnn_size'] = qp_matched_size * 2
        if opt['pp_bottleneck']:
            opt['pp_rnn_size'] = opt['hidden_size_bottleneck']
        
        self.pp_match = layers.GatedAttentionBilinearRNN(
            x_size = qp_matched_size,
            y_size = qp_matched_size,            
            hidden_size= opt['pp_rnn_size'],
            padding=opt['rnn_padding'],
            rnn_type=self.RNN_TYPES[opt['rnn_type']],
            birnn=opt['pp_birnn'],
            concat = opt['pp_concat'],
            gate=opt['pp_gate'], 
            rnn=opt['pp_rnn'],
            identity = ['pp_identity']
        )
        pp_matched_size = opt['pp_rnn_size']
        if opt['pp_birnn'] and opt['pp_rnn']:
            pp_matched_size = pp_matched_size * 2
        if opt['pp_concat']:
            pp_matched_size = pp_matched_size + qp_matched_size
                
        # Bilinear attention for span start/end
        if opt['task_QA']:
            self.start_attn = layers.BilinearSeqAttn(
                pp_matched_size,
                question_hidden_size
                )
            self.end_attn = layers.BilinearSeqAttn(
                pp_matched_size,
                question_hidden_size
                )
                           
        # Paragraph Hierarchical Encoder
        if opt['ans_sent_predict'] :
            self.meanpoolLayer = layers.Selective_Meanpool(doc_hidden_size)
            self.sentBRNN = layers.StackedBRNN(
                input_size=pp_matched_size,
                hidden_size=opt['hidden_size_sent'],
                num_layers=opt['nLayer_Sent'],
                concat_layers=False,
                rnn_type=self.RNN_TYPES[opt['rnn_type']],
                padding=opt['rnn_padding_sent'],
            )
            self.sentseqAttn = layers.BilinearSeqAttn(
                opt['hidden_size_sent'],
                question_hidden_size,
                )
            #print('DEBUG (no hRNN)')
    #def forward(self, x1, x1_f, x1_mask, x2, x2_mask, x1_c, x1_c_mask, x2_c, x2_c_mask):
    #def forward(self, x1, x1_f, x1_mask, x2, x2_mask, x1_c=None, x2_c=None):  # for this version, we do not utilize mask for char
    def forward(self, x1, x1_f, x1_mask, x2, x2_mask, x1_c=None, x2_c=None, x1_sent_mask=None, word_boundary=None):  # for this version, we do not utilize mask for char
        #pdb.set_trace()
        """Inputs:
        x1 = document word indices             [batch * len_d]
        x1_f = document word features indices  [batch * len_d * nfeat]
        x1_mask = document padding mask        [batch * len_d] ==>
        x2 = question word indices             [batch * len_q]
        x2_mask = question padding mask        [batch * len_q] ==>
        x1_c = document char indices           [batch * len_d * max_char_per_word]
        x1_c_mask = document char padding mask [batch * len_d * max_char_per_word] --> not implemented in this version
        x2_c = question char indices           [batch * len_q * max_char_per_word]
        x2_c_mask = question char padding mask [batch * len_q * max_char_per_word] --> not implemented in this version
        """
        # Embed both document and question
        batch_size = x1.size()[0]
        doc_len = x1.size()[1]
        ques_len = x2.size()[1]
        x1_emb = self.embedding(x1) # N x Td x D
        x2_emb = self.embedding(x2) # N x Tq x D
        if self.opt['add_char2word']:
            max_wordL_d = x1_c.size()[2]
            max_wordL_q = x2_c.size()[2]
            x1_c = x1_c.view(-1, max_wordL_d)
            x2_c = x2_c.view(-1, max_wordL_q)
            x1_c_emb = self.char_embedding(x1_c)
            x2_c_emb = self.char_embedding(x2_c)
            x1_c_emb = x1_c_emb.view(batch_size,
                                     doc_len,
                                     max_wordL_d,
                                     -1)
            x2_c_emb = x2_c_emb.view(batch_size,
                                     ques_len,
                                     max_wordL_q,
                                     -1)
            # Produce char-aware word embed
            x1_cw_emb = self.TDNN(x1_c_emb)  # N x Td x sum(H)
            x2_cw_emb = self.TDNN(x2_c_emb)  # N x Tq x sum(H)
            # Merge word + char
            x1_emb = torch.cat((x1_emb, x1_cw_emb), 2)
            x2_emb = torch.cat((x2_emb, x2_cw_emb), 2)
            ###x1_mask = torch.cat([x1_mask, x1_c_mask], 2)  # For this version, we do not utilize char mask
            ###x2_mask = torch.cat([x2_mask, x2_c_mask], 2)  # For this version, we do not utilize char mask
            # Highway network
            if self.opt['nLayer_Highway'] > 0:
                [batch_size, seq_len, embed_size] = x1_emb.size()
                x1_emb = self.Highway(x1_emb.view(-1, embed_size))
                x1_emb = x1_emb.view(batch_size, -1, embed_size)
                [batch_size, seq_len, embed_size] = x2_emb.size()
                x2_emb = self.Highway(x2_emb.view(-1, embed_size))
                x2_emb = x2_emb.view(batch_size, -1, embed_size)
        else:
            if (('x1_c' in locals()) and ('x2_c' in locals())):
                #pdb.set_trace()
                x1_sent_mask = x1_c
                word_boundary = x2_c
        # Dropout on embeddings
        if self.opt['dropout_emb'] > 0:
            x1_emb = nn.functional.dropout(x1_emb, p=self.opt['dropout_emb'], training=self.training)
            x2_emb = nn.functional.dropout(x2_emb, p=self.opt['dropout_emb'], training=self.training)
        # Add attention-weighted question representation
        if self.opt['use_qemb']:
            x2_weighted_emb = self.qemb_match(x1_emb, x2_emb, x2_mask)
            drnn_input = torch.cat([x1_emb, x2_weighted_emb, x1_f], 2)
        else:
            drnn_input = torch.cat([x1_emb, x1_f], 2)
        # Encode document with RNN
        doc_hiddens = self.doc_rnn(drnn_input, x1_mask)
        #pdb.set_trace()
        # Encode question with RNN
        question_hiddens = self.question_rnn(x2_emb, x2_mask)
        
        # QP matching
        qp_matched_doc = self.qp_match(doc_hiddens, x1_mask, question_hiddens, x2_mask)
        
        # PP matching
        if not qp_matched_doc.is_contiguous():
            qp_matched_doc = qp_matched_doc.contiguous()
            
        pp_matched_doc = self.pp_match(qp_matched_doc, x1_mask, qp_matched_doc, x1_mask)
        #print(pp_matched_doc.size())
        #pdb.set_trace()
        
        # Merge question hiddens
        if self.opt['question_merge'] == 'avg':
            q_merge_weights = layers.uniform_weights(question_hiddens, x2_mask)
        elif self.opt['question_merge'] == 'self_attn':
            q_merge_weights = self.self_attn(question_hiddens, x2_mask)
        question_hidden = layers.weighted_avg(question_hiddens, q_merge_weights)
        return_list = []
        # Predict start and end positions
        if self.opt['task_QA']:
            start_scores = self.start_attn(pp_matched_doc, question_hidden, x1_mask)
            end_scores = self.end_attn(pp_matched_doc, question_hidden, x1_mask)
            return_list = return_list + [start_scores, end_scores]
        # Pooling , currently no multi-task learning
        if self.opt['ans_sent_predict']:
            sent_hiddens = self.meanpoolLayer(pp_matched_doc, word_boundary)
            if self.opt['nLayer_Sent'] > 0:
                sent_hiddens = self.sentBRNN(sent_hiddens, x1_sent_mask)
            sent_scores = self.sentseqAttn(sent_hiddens, question_hidden, x1_sent_mask)
            return_list = return_list + [sent_scores]
        return return_list
 | |
| 
	# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from kern import Kern
from ...util.linalg import mdot
from ...util.decorators import silence_errors
from ...core.parameterization.param import Param
from ...core.parameterization.transformations import Logexp
class Periodic(Kern):
    def __init__(self, input_dim, variance, lengthscale, period, n_freq, lower, upper, active_dims, name):
        """
        :type input_dim: int
        :param variance: the variance of the Matern kernel
        :type variance: float
        :param lengthscale: the lengthscale of the Matern kernel
        :type lengthscale: np.ndarray of size (input_dim,)
        :param period: the period
        :type period: float
        :param n_freq: the number of frequencies considered for the periodic subspace
        :type n_freq: int
        :rtype: kernel object
        """
        assert input_dim==1, "Periodic kernels are only defined for input_dim=1"
        super(Periodic, self).__init__(input_dim, active_dims, name)
        self.input_dim = input_dim
        self.lower,self.upper = lower, upper
        self.n_freq = n_freq
        self.n_basis = 2*n_freq
        self.variance = Param('variance', np.float64(variance), Logexp())
        self.lengthscale = Param('lengthscale', np.float64(lengthscale), Logexp())
        self.period = Param('period', np.float64(period), Logexp())
        self.link_parameters(self.variance, self.lengthscale, self.period)
    def _cos(self, alpha, omega, phase):
        def f(x):
            return alpha*np.cos(omega*x + phase)
        return f
    @silence_errors
    def _cos_factorization(self, alpha, omega, phase):
        r1 = np.sum(alpha*np.cos(phase),axis=1)[:,None]
        r2 = np.sum(alpha*np.sin(phase),axis=1)[:,None]
        r =  np.sqrt(r1**2 + r2**2)
        psi = np.where(r1 != 0, (np.arctan(r2/r1) + (r1<0.)*np.pi),np.arcsin(r2))
        return r,omega[:,0:1], psi
    @silence_errors
    def _int_computation(self,r1,omega1,phi1,r2,omega2,phi2):
        Gint1 = 1./(omega1+omega2.T)*( np.sin((omega1+omega2.T)*self.upper+phi1+phi2.T) - np.sin((omega1+omega2.T)*self.lower+phi1+phi2.T)) + 1./(omega1-omega2.T)*( np.sin((omega1-omega2.T)*self.upper+phi1-phi2.T) - np.sin((omega1-omega2.T)*self.lower+phi1-phi2.T) )
        Gint2 = 1./(omega1+omega2.T)*( np.sin((omega1+omega2.T)*self.upper+phi1+phi2.T) - np.sin((omega1+omega2.T)*self.lower+phi1+phi2.T)) +  np.cos(phi1-phi2.T)*(self.upper-self.lower)
        Gint = np.dot(r1,r2.T)/2 * np.where(np.isnan(Gint1),Gint2,Gint1)
        return Gint
    def K(self, X, X2=None):
        FX = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X)
        if X2 is None:
            FX2 = FX
        else:
            FX2 = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X2)
        return mdot(FX,self.Gi,FX2.T)
    def Kdiag(self,X):
        return np.diag(self.K(X))
class PeriodicExponential(Periodic):
    """
    Kernel of the periodic subspace (up to a given frequency) of a exponential
    (Matern 1/2) RKHS.
    Only defined for input_dim=1.
    """
    def __init__(self, input_dim=1, variance=1., lengthscale=1., period=2.*np.pi, n_freq=10, lower=0., upper=4*np.pi, active_dims=None, name='periodic_exponential'):
        super(PeriodicExponential, self).__init__(input_dim, variance, lengthscale, period, n_freq, lower, upper, active_dims, name)
    def parameters_changed(self):
        self.a = [1./self.lengthscale, 1.]
        self.b = [1]
        self.basis_alpha = np.ones((self.n_basis,))
        self.basis_omega = (2*np.pi*np.arange(1,self.n_freq+1)/self.period).repeat(2)
        self.basis_phi =   np.zeros(self.n_freq * 2)
        self.basis_phi[::2] = -np.pi/2
        self.G = self.Gram_matrix()
        self.Gi = np.linalg.inv(self.G)
    def Gram_matrix(self):
        La = np.column_stack((self.a[0]*np.ones((self.n_basis,1)),self.a[1]*self.basis_omega))
        Lo = np.column_stack((self.basis_omega,self.basis_omega))
        Lp = np.column_stack((self.basis_phi,self.basis_phi+np.pi/2))
        r,omega,phi =  self._cos_factorization(La,Lo,Lp)
        Gint = self._int_computation( r,omega,phi, r,omega,phi)
        Flower = np.array(self._cos(self.basis_alpha,self.basis_omega,self.basis_phi)(self.lower))[:,None]
        return(self.lengthscale/(2*self.variance) * Gint + 1./self.variance*np.dot(Flower,Flower.T))
    @silence_errors
    def update_gradients_full(self, dL_dK, X, X2=None):
        """derivative of the covariance matrix with respect to the parameters (shape is N x num_inducing x num_params)"""
        if X2 is None: X2 = X
        FX  = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X)
        FX2 = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X2)
        La = np.column_stack((self.a[0]*np.ones((self.n_basis,1)),self.a[1]*self.basis_omega))
        Lo = np.column_stack((self.basis_omega,self.basis_omega))
        Lp = np.column_stack((self.basis_phi,self.basis_phi+np.pi/2))
        r,omega,phi =  self._cos_factorization(La,Lo,Lp)
        Gint = self._int_computation( r,omega,phi, r,omega,phi)
        Flower = np.array(self._cos(self.basis_alpha,self.basis_omega,self.basis_phi)(self.lower))[:,None]
        #dK_dvar
        dK_dvar = 1./self.variance*mdot(FX,self.Gi,FX2.T)
        #dK_dlen
        da_dlen = [-1./self.lengthscale**2,0.]
        dLa_dlen =  np.column_stack((da_dlen[0]*np.ones((self.n_basis,1)),da_dlen[1]*self.basis_omega))
        r1,omega1,phi1 = self._cos_factorization(dLa_dlen,Lo,Lp)
        dGint_dlen = self._int_computation(r1,omega1,phi1, r,omega,phi)
        dGint_dlen = dGint_dlen + dGint_dlen.T
        dG_dlen = 1./2*Gint + self.lengthscale/2*dGint_dlen
        dK_dlen = -mdot(FX,self.Gi,dG_dlen/self.variance,self.Gi,FX2.T)
        #dK_dper
        dFX_dper  = self._cos(-self.basis_alpha[None,:]*self.basis_omega[None,:]/self.period*X ,self.basis_omega[None,:],self.basis_phi[None,:]+np.pi/2)(X)
        dFX2_dper = self._cos(-self.basis_alpha[None,:]*self.basis_omega[None,:]/self.period*X2,self.basis_omega[None,:],self.basis_phi[None,:]+np.pi/2)(X2)
        dLa_dper = np.column_stack((-self.a[0]*self.basis_omega/self.period, -self.a[1]*self.basis_omega**2/self.period))
        dLp_dper = np.column_stack((self.basis_phi+np.pi/2,self.basis_phi+np.pi))
        r1,omega1,phi1 =  self._cos_factorization(dLa_dper,Lo,dLp_dper)
        IPPprim1 =  self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2)  +  1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi/2))
        IPPprim1 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2)  +  1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi/2))
        # SIMPLIFY!!!       IPPprim1 = (self.upper - self.lower)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2)  +  1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi/2))
        IPPprim2 =  self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2)  + self.upper*np.cos(phi-phi1.T))
        IPPprim2 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2)  + self.lower*np.cos(phi-phi1.T))
        IPPprim = np.where(np.logical_or(np.isnan(IPPprim1), np.isinf(IPPprim1)), IPPprim2, IPPprim1)
        IPPint1 =  1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi)  +  1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi)
        IPPint1 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi)  +  1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi)
        IPPint2 =  1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi)  + 1./2*self.upper**2*np.cos(phi-phi1.T)
        IPPint2 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi)  + 1./2*self.lower**2*np.cos(phi-phi1.T)
        #IPPint2[0,0] = (self.upper**2 - self.lower**2)*np.cos(phi[0,0])*np.cos(phi1[0,0])
        IPPint = np.where(np.isnan(IPPint1),IPPint2,IPPint1)
        dLa_dper2 = np.column_stack((-self.a[1]*self.basis_omega/self.period))
        dLp_dper2 = np.column_stack((self.basis_phi+np.pi/2))
        r2,omega2,phi2 = dLa_dper2.T,Lo[:,0:1],dLp_dper2.T
        dGint_dper = np.dot(r,r1.T)/2 * (IPPprim - IPPint) + self._int_computation(r2,omega2,phi2, r,omega,phi)
        dGint_dper = dGint_dper + dGint_dper.T
        dFlower_dper  = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega/self.period,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]
        dG_dper = 1./self.variance*(self.lengthscale/2*dGint_dper + self.b[0]*(np.dot(dFlower_dper,Flower.T)+np.dot(Flower,dFlower_dper.T)))
        dK_dper = mdot(dFX_dper,self.Gi,FX2.T) - mdot(FX,self.Gi,dG_dper,self.Gi,FX2.T) + mdot(FX,self.Gi,dFX2_dper.T)
        self.variance.gradient = np.sum(dK_dvar*dL_dK)
        self.lengthscale.gradient = np.sum(dK_dlen*dL_dK)
        self.period.gradient = np.sum(dK_dper*dL_dK)
class PeriodicMatern32(Periodic):
    """
    Kernel of the periodic subspace (up to a given frequency) of a Matern 3/2 RKHS. Only defined for input_dim=1.
    :param input_dim: the number of input dimensions
    :type input_dim: int
    :param variance: the variance of the Matern kernel
    :type variance: float
    :param lengthscale: the lengthscale of the Matern kernel
    :type lengthscale: np.ndarray of size (input_dim,)
    :param period: the period
    :type period: float
    :param n_freq: the number of frequencies considered for the periodic subspace
    :type n_freq: int
    :rtype: kernel object
    """
    def __init__(self, input_dim=1, variance=1., lengthscale=1., period=2.*np.pi, n_freq=10, lower=0., upper=4*np.pi, active_dims=None, name='periodic_Matern32'):
        super(PeriodicMatern32, self).__init__(input_dim, variance, lengthscale, period, n_freq, lower, upper, active_dims, name)
    def parameters_changed(self):
        self.a = [3./self.lengthscale**2, 2*np.sqrt(3)/self.lengthscale, 1.]
        self.b = [1,self.lengthscale**2/3]
        self.basis_alpha = np.ones((self.n_basis,))
        self.basis_omega = (2*np.pi*np.arange(1,self.n_freq+1)/self.period).repeat(2)
        self.basis_phi =   np.zeros(self.n_freq * 2)
        self.basis_phi[::2] = -np.pi/2
        self.G = self.Gram_matrix()
        self.Gi = np.linalg.inv(self.G)
    def Gram_matrix(self):
        La = np.column_stack((self.a[0]*np.ones((self.n_basis,1)),self.a[1]*self.basis_omega,self.a[2]*self.basis_omega**2))
        Lo = np.column_stack((self.basis_omega,self.basis_omega,self.basis_omega))
        Lp = np.column_stack((self.basis_phi,self.basis_phi+np.pi/2,self.basis_phi+np.pi))
        r,omega,phi =  self._cos_factorization(La,Lo,Lp)
        Gint = self._int_computation( r,omega,phi, r,omega,phi)
        Flower = np.array(self._cos(self.basis_alpha,self.basis_omega,self.basis_phi)(self.lower))[:,None]
        F1lower = np.array(self._cos(self.basis_alpha*self.basis_omega,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]
        return(self.lengthscale**3/(12*np.sqrt(3)*self.variance) * Gint + 1./self.variance*np.dot(Flower,Flower.T) + self.lengthscale**2/(3.*self.variance)*np.dot(F1lower,F1lower.T))
    @silence_errors
    def update_gradients_full(self,dL_dK,X,X2):
        """derivative of the covariance matrix with respect to the parameters (shape is num_data x num_inducing x num_params)"""
        if X2 is None: X2 = X
        FX  = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X)
        FX2 = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X2)
        La = np.column_stack((self.a[0]*np.ones((self.n_basis,1)),self.a[1]*self.basis_omega,self.a[2]*self.basis_omega**2))
        Lo = np.column_stack((self.basis_omega,self.basis_omega,self.basis_omega))
        Lp = np.column_stack((self.basis_phi,self.basis_phi+np.pi/2,self.basis_phi+np.pi))
        r,omega,phi =  self._cos_factorization(La,Lo,Lp)
        Gint = self._int_computation( r,omega,phi, r,omega,phi)
        Flower = np.array(self._cos(self.basis_alpha,self.basis_omega,self.basis_phi)(self.lower))[:,None]
        F1lower = np.array(self._cos(self.basis_alpha*self.basis_omega,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]
        #dK_dvar
        dK_dvar = 1./self.variance*mdot(FX,self.Gi,FX2.T)
        #dK_dlen
        da_dlen = [-6/self.lengthscale**3,-2*np.sqrt(3)/self.lengthscale**2,0.]
        db_dlen = [0.,2*self.lengthscale/3.]
        dLa_dlen =  np.column_stack((da_dlen[0]*np.ones((self.n_basis,1)),da_dlen[1]*self.basis_omega,da_dlen[2]*self.basis_omega**2))
        r1,omega1,phi1 = self._cos_factorization(dLa_dlen,Lo,Lp)
        dGint_dlen = self._int_computation(r1,omega1,phi1, r,omega,phi)
        dGint_dlen = dGint_dlen + dGint_dlen.T
        dG_dlen = self.lengthscale**2/(4*np.sqrt(3))*Gint + self.lengthscale**3/(12*np.sqrt(3))*dGint_dlen + db_dlen[0]*np.dot(Flower,Flower.T) + db_dlen[1]*np.dot(F1lower,F1lower.T)
        dK_dlen = -mdot(FX,self.Gi,dG_dlen/self.variance,self.Gi,FX2.T)
        #dK_dper
        dFX_dper  = self._cos(-self.basis_alpha[None,:]*self.basis_omega[None,:]/self.period*X ,self.basis_omega[None,:],self.basis_phi[None,:]+np.pi/2)(X)
        dFX2_dper = self._cos(-self.basis_alpha[None,:]*self.basis_omega[None,:]/self.period*X2,self.basis_omega[None,:],self.basis_phi[None,:]+np.pi/2)(X2)
        dLa_dper = np.column_stack((-self.a[0]*self.basis_omega/self.period, -self.a[1]*self.basis_omega**2/self.period, -self.a[2]*self.basis_omega**3/self.period))
        dLp_dper = np.column_stack((self.basis_phi+np.pi/2,self.basis_phi+np.pi,self.basis_phi+np.pi*3/2))
        r1,omega1,phi1 =  self._cos_factorization(dLa_dper,Lo,dLp_dper)
        IPPprim1 =  self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2)  +  1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi/2))
        IPPprim1 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2)  +  1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi/2))
        IPPprim2 =  self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2)  + self.upper*np.cos(phi-phi1.T))
        IPPprim2 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2)  + self.lower*np.cos(phi-phi1.T))
        IPPprim = np.where(np.isnan(IPPprim1),IPPprim2,IPPprim1)
        IPPint1 =  1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi)  +  1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi)
        IPPint1 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi)  +  1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi)
        IPPint2 =  1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi)  + 1./2*self.upper**2*np.cos(phi-phi1.T)
        IPPint2 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi)  + 1./2*self.lower**2*np.cos(phi-phi1.T)
        IPPint = np.where(np.isnan(IPPint1),IPPint2,IPPint1)
        dLa_dper2 = np.column_stack((-self.a[1]*self.basis_omega/self.period, -2*self.a[2]*self.basis_omega**2/self.period))
        dLp_dper2 = np.column_stack((self.basis_phi+np.pi/2,self.basis_phi+np.pi))
        r2,omega2,phi2 =  self._cos_factorization(dLa_dper2,Lo[:,0:2],dLp_dper2)
        dGint_dper = np.dot(r,r1.T)/2 * (IPPprim - IPPint) +  self._int_computation(r2,omega2,phi2, r,omega,phi)
        dGint_dper = dGint_dper + dGint_dper.T
        dFlower_dper  = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega/self.period,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]
        dF1lower_dper = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega**2/self.period,self.basis_omega,self.basis_phi+np.pi)(self.lower)+self._cos(-self.basis_alpha*self.basis_omega/self.period,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]
        dG_dper = 1./self.variance*(self.lengthscale**3/(12*np.sqrt(3))*dGint_dper + self.b[0]*(np.dot(dFlower_dper,Flower.T)+np.dot(Flower,dFlower_dper.T)) + self.b[1]*(np.dot(dF1lower_dper,F1lower.T)+np.dot(F1lower,dF1lower_dper.T)))
        dK_dper = mdot(dFX_dper,self.Gi,FX2.T) - mdot(FX,self.Gi,dG_dper,self.Gi,FX2.T) + mdot(FX,self.Gi,dFX2_dper.T)
        self.variance.gradient = np.sum(dK_dvar*dL_dK)
        self.lengthscale.gradient = np.sum(dK_dlen*dL_dK)
        self.period.gradient = np.sum(dK_dper*dL_dK)
class PeriodicMatern52(Periodic):
    """
    Kernel of the periodic subspace (up to a given frequency) of a Matern 5/2 RKHS. Only defined for input_dim=1.
    :param input_dim: the number of input dimensions
    :type input_dim: int
    :param variance: the variance of the Matern kernel
    :type variance: float
    :param lengthscale: the lengthscale of the Matern kernel
    :type lengthscale: np.ndarray of size (input_dim,)
    :param period: the period
    :type period: float
    :param n_freq: the number of frequencies considered for the periodic subspace
    :type n_freq: int
    :rtype: kernel object
    """
    def __init__(self, input_dim=1, variance=1., lengthscale=1., period=2.*np.pi, n_freq=10, lower=0., upper=4*np.pi, active_dims=None, name='periodic_Matern52'):
        super(PeriodicMatern52, self).__init__(input_dim, variance, lengthscale, period, n_freq, lower, upper, active_dims, name)
    def parameters_changed(self):
        self.a = [5*np.sqrt(5)/self.lengthscale**3, 15./self.lengthscale**2,3*np.sqrt(5)/self.lengthscale, 1.]
        self.b  = [9./8, 9*self.lengthscale**4/200., 3*self.lengthscale**2/5., 3*self.lengthscale**2/(5*8.), 3*self.lengthscale**2/(5*8.)]
        self.basis_alpha = np.ones((2*self.n_freq,))
        self.basis_omega = (2*np.pi*np.arange(1,self.n_freq+1)/self.period).repeat(2)
        self.basis_phi =   np.zeros(self.n_freq * 2)
        self.basis_phi[::2] = -np.pi/2
        self.G = self.Gram_matrix()
        self.Gi = np.linalg.inv(self.G)
    def Gram_matrix(self):
        La = np.column_stack((self.a[0]*np.ones((self.n_basis,1)), self.a[1]*self.basis_omega, self.a[2]*self.basis_omega**2, self.a[3]*self.basis_omega**3))
        Lo = np.column_stack((self.basis_omega, self.basis_omega, self.basis_omega, self.basis_omega))
        Lp = np.column_stack((self.basis_phi, self.basis_phi+np.pi/2, self.basis_phi+np.pi, self.basis_phi+np.pi*3/2))
        r,omega,phi =  self._cos_factorization(La,Lo,Lp)
        Gint = self._int_computation( r,omega,phi, r,omega,phi)
        Flower = np.array(self._cos(self.basis_alpha,self.basis_omega,self.basis_phi)(self.lower))[:,None]
        F1lower = np.array(self._cos(self.basis_alpha*self.basis_omega,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]
        F2lower = np.array(self._cos(self.basis_alpha*self.basis_omega**2,self.basis_omega,self.basis_phi+np.pi)(self.lower))[:,None]
        lower_terms = self.b[0]*np.dot(Flower,Flower.T) + self.b[1]*np.dot(F2lower,F2lower.T) + self.b[2]*np.dot(F1lower,F1lower.T) + self.b[3]*np.dot(F2lower,Flower.T) + self.b[4]*np.dot(Flower,F2lower.T)
        return(3*self.lengthscale**5/(400*np.sqrt(5)*self.variance) * Gint + 1./self.variance*lower_terms)
    @silence_errors
    def update_gradients_full(self, dL_dK, X, X2=None):
        if X2 is None: X2 = X
        FX  = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X)
        FX2 = self._cos(self.basis_alpha[None,:],self.basis_omega[None,:],self.basis_phi[None,:])(X2)
        La = np.column_stack((self.a[0]*np.ones((self.n_basis,1)), self.a[1]*self.basis_omega, self.a[2]*self.basis_omega**2, self.a[3]*self.basis_omega**3))
        Lo = np.column_stack((self.basis_omega, self.basis_omega, self.basis_omega, self.basis_omega))
        Lp = np.column_stack((self.basis_phi, self.basis_phi+np.pi/2, self.basis_phi+np.pi, self.basis_phi+np.pi*3/2))
        r,omega,phi =  self._cos_factorization(La,Lo,Lp)
        Gint = self._int_computation( r,omega,phi, r,omega,phi)
        Flower = np.array(self._cos(self.basis_alpha,self.basis_omega,self.basis_phi)(self.lower))[:,None]
        F1lower = np.array(self._cos(self.basis_alpha*self.basis_omega,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]
        F2lower = np.array(self._cos(self.basis_alpha*self.basis_omega**2,self.basis_omega,self.basis_phi+np.pi)(self.lower))[:,None]
        #dK_dvar
        dK_dvar = 1./self.variance*mdot(FX,self.Gi,FX2.T)
        #dK_dlen
        da_dlen = [-3*self.a[0]/self.lengthscale, -2*self.a[1]/self.lengthscale, -self.a[2]/self.lengthscale, 0.]
        db_dlen = [0., 4*self.b[1]/self.lengthscale, 2*self.b[2]/self.lengthscale, 2*self.b[3]/self.lengthscale, 2*self.b[4]/self.lengthscale]
        dLa_dlen =  np.column_stack((da_dlen[0]*np.ones((self.n_basis,1)), da_dlen[1]*self.basis_omega, da_dlen[2]*self.basis_omega**2, da_dlen[3]*self.basis_omega**3))
        r1,omega1,phi1 = self._cos_factorization(dLa_dlen,Lo,Lp)
        dGint_dlen = self._int_computation(r1,omega1,phi1, r,omega,phi)
        dGint_dlen = dGint_dlen + dGint_dlen.T
        dlower_terms_dlen = db_dlen[0]*np.dot(Flower,Flower.T) + db_dlen[1]*np.dot(F2lower,F2lower.T) + db_dlen[2]*np.dot(F1lower,F1lower.T) + db_dlen[3]*np.dot(F2lower,Flower.T) + db_dlen[4]*np.dot(Flower,F2lower.T)
        dG_dlen = 15*self.lengthscale**4/(400*np.sqrt(5))*Gint + 3*self.lengthscale**5/(400*np.sqrt(5))*dGint_dlen + dlower_terms_dlen
        dK_dlen = -mdot(FX,self.Gi,dG_dlen/self.variance,self.Gi,FX2.T)
        #dK_dper
        dFX_dper  = self._cos(-self.basis_alpha[None,:]*self.basis_omega[None,:]/self.period*X ,self.basis_omega[None,:],self.basis_phi[None,:]+np.pi/2)(X)
        dFX2_dper = self._cos(-self.basis_alpha[None,:]*self.basis_omega[None,:]/self.period*X2,self.basis_omega[None,:],self.basis_phi[None,:]+np.pi/2)(X2)
        dLa_dper = np.column_stack((-self.a[0]*self.basis_omega/self.period, -self.a[1]*self.basis_omega**2/self.period, -self.a[2]*self.basis_omega**3/self.period, -self.a[3]*self.basis_omega**4/self.period))
        dLp_dper = np.column_stack((self.basis_phi+np.pi/2,self.basis_phi+np.pi,self.basis_phi+np.pi*3/2,self.basis_phi))
        r1,omega1,phi1 =  self._cos_factorization(dLa_dper,Lo,dLp_dper)
        IPPprim1 =  self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2)  +  1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi/2))
        IPPprim1 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2)  +  1./(omega-omega1.T)*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi/2))
        IPPprim2 =  self.upper*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi/2)  + self.upper*np.cos(phi-phi1.T))
        IPPprim2 -= self.lower*(1./(omega+omega1.T)*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi/2)  + self.lower*np.cos(phi-phi1.T))
        IPPprim = np.where(np.isnan(IPPprim1),IPPprim2,IPPprim1)
        IPPint1 =  1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi)  +  1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.upper+phi-phi1.T-np.pi)
        IPPint1 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi)  +  1./(omega-omega1.T)**2*np.cos((omega-omega1.T)*self.lower+phi-phi1.T-np.pi)
        IPPint2 =  1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.upper+phi+phi1.T-np.pi)  + 1./2*self.upper**2*np.cos(phi-phi1.T)
        IPPint2 -= 1./(omega+omega1.T)**2*np.cos((omega+omega1.T)*self.lower+phi+phi1.T-np.pi)  + 1./2*self.lower**2*np.cos(phi-phi1.T)
        IPPint = np.where(np.isnan(IPPint1),IPPint2,IPPint1)
        dLa_dper2 = np.column_stack((-self.a[1]*self.basis_omega/self.period, -2*self.a[2]*self.basis_omega**2/self.period, -3*self.a[3]*self.basis_omega**3/self.period))
        dLp_dper2 = np.column_stack((self.basis_phi+np.pi/2, self.basis_phi+np.pi, self.basis_phi+np.pi*3/2))
        r2,omega2,phi2 =  self._cos_factorization(dLa_dper2,Lo[:,0:2],dLp_dper2)
        dGint_dper = np.dot(r,r1.T)/2 * (IPPprim - IPPint) +  self._int_computation(r2,omega2,phi2, r,omega,phi)
        dGint_dper = dGint_dper + dGint_dper.T
        dFlower_dper  = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega/self.period,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]
        dF1lower_dper = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega**2/self.period,self.basis_omega,self.basis_phi+np.pi)(self.lower)+self._cos(-self.basis_alpha*self.basis_omega/self.period,self.basis_omega,self.basis_phi+np.pi/2)(self.lower))[:,None]
        dF2lower_dper = np.array(self._cos(-self.lower*self.basis_alpha*self.basis_omega**3/self.period,self.basis_omega,self.basis_phi+np.pi*3/2)(self.lower) + self._cos(-2*self.basis_alpha*self.basis_omega**2/self.period,self.basis_omega,self.basis_phi+np.pi)(self.lower))[:,None]
        dlower_terms_dper  = self.b[0] * (np.dot(dFlower_dper,Flower.T) + np.dot(Flower.T,dFlower_dper))
        dlower_terms_dper += self.b[1] * (np.dot(dF2lower_dper,F2lower.T) + np.dot(F2lower,dF2lower_dper.T)) - 4*self.b[1]/self.period*np.dot(F2lower,F2lower.T)
        dlower_terms_dper += self.b[2] * (np.dot(dF1lower_dper,F1lower.T) + np.dot(F1lower,dF1lower_dper.T)) - 2*self.b[2]/self.period*np.dot(F1lower,F1lower.T)
        dlower_terms_dper += self.b[3] * (np.dot(dF2lower_dper,Flower.T) + np.dot(F2lower,dFlower_dper.T)) - 2*self.b[3]/self.period*np.dot(F2lower,Flower.T)
        dlower_terms_dper += self.b[4] * (np.dot(dFlower_dper,F2lower.T) + np.dot(Flower,dF2lower_dper.T)) - 2*self.b[4]/self.period*np.dot(Flower,F2lower.T)
        dG_dper = 1./self.variance*(3*self.lengthscale**5/(400*np.sqrt(5))*dGint_dper + 0.5*dlower_terms_dper)
        dK_dper = mdot(dFX_dper,self.Gi,FX2.T) - mdot(FX,self.Gi,dG_dper,self.Gi,FX2.T) + mdot(FX,self.Gi,dFX2_dper.T)
        self.variance.gradient = np.sum(dK_dvar*dL_dK)
        self.lengthscale.gradient = np.sum(dK_dlen*dL_dK)
        self.period.gradient = np.sum(dK_dper*dL_dK)
 | |
| 
	import sys
import os
import struct
import binascii
import math
from PyQt4 import QtCore, QtGui
from time import strftime, localtime
# from struct import pack
# from binascii import hexlify, unhexlify
# from math import atan, sin, cos
from numpy import argmax, isnan
from gui import Ui_MainWindow
from aux_fcns import *
from settings import *
class MyApp(QtGui.QMainWindow):
    def __init__(self, parent=None):
        QtGui.QWidget.__init__(self, parent)
        self.ui = Ui_MainWindow()
        self.ui.setupUi(self)
        # Start of user interaction code
        self.ui.pb_run.clicked.connect(self.openrundir)
        self.ui.pb_map.clicked.connect(self.choosemappath)
        self.ui.pb_alignsave.clicked.connect(self.saveoutput)
        self.ui.pb_preview.clicked.connect(self.calcoutput)
        self.ui.le_atskip.setReadOnly(True)
        # Message buffer
        self.msglist = []
    def logmsg(self, msg):
        """Helper method for adding timestamped prefix to log messages."""
        tsmsg = "[" + strftime("%H:%M", localtime()) + "] " + msg
        self.msglist.append(tsmsg)
    def printlog(self):
        """Prints log messages to GUI text browser."""
        self.msglist.append("\n")
        self.ui.br_outputlog.setText("\n".join(self.msglist))
    def openrundir(self):
        """Check for valid serial number in directory name or parent directory name
        in case 'run' folder is specified. If valid (checksum), read platemap from
        database storage and get map ID. Otherwise, manually select map file."""
        self.ui.le_run.setText(
            QtGui.QFileDialog.getExistingDirectory(self, "Select Run Directory", rundir)
        )
        self.rundir = str(self.ui.le_run.text())
        if any(
            [
                runstatus in self.rundir
                for runstatus in [".done", ".run", ".invalid", ".copied"]
            ]
        ):
            basedir = os.path.basename(os.path.dirname(self.rundir))
        else:
            basedir = os.path.basename(self.rundir)
        try:
            serial = basedir.replace("-", "_").split("_")[-1]
            plateid = serial[:-1]
            checksum = serial[-1]
            float(serial)
        except:
            self.logmsg("Serial number not found in folder path.")
        if sum(int(i) for i in plateid) % 10 == int(checksum):
            plateidpath = os.path.join(platedir, plateid)
            platemaps = [fn for fn in os.listdir(plateidpath) if fn.endswith(".map")]
            if len(platemaps) > 1:
                timestamps = [
                    float("".join([a for a in pm.split("-")[2] if a in "0123456789"]))
                    for pm in platemaps
                ]
                self.logmsg(
                    "Multiple prints found, loading most recent print map unless specified."
                )
                self.ui.le_map.setText(
                    os.path.join(plateidpath, platemaps[argmax(timestamps)])
                )
            else:
                self.logmsg("Print map found.")
                self.ui.le_map.setText(os.path.join(plateidpath, platemaps[0]))
            infod = readrcp(
                os.path.join(plateidpath, os.path.basename(plateidpath) + ".info")
            )
            lastmap = max([int(s.split("__")[-1]) for s in infod["prints"].keys()])
            self.openmaptxt(usemap=infod["prints"]["prints__" + str(lastmap)]["map_id"])
        else:
            self.logmsg("Bad checksum for serial number in folder path.")
    def choosemappath(self):
        """Manual plate map selection."""
        self.ui.le_map.setText(
            QtGui.QFileDialog.getOpenFileName(
                self, "Select Platemap File", mapdir, "Platemap files (*.txt)"
            )
        )
        self.openmaptxt()
    def openmaptxt(self, usemap=0):
        """Read plate map text file and populate filter fields in GUI. Work around for
        not having to deal with empty fields."""
        mapdir = str(self.ui.le_map.text())
        self.mapdlist = readsingleplatemaptxt(mapdir)
        if usemap:
            self.map_id = str(usemap)
        else:
            self.map_id = str(int(os.path.basename(mapdir).split("-")[0]))
        self.ui.le_xmin.setText(str(min([d["x"] for d in self.mapdlist])))
        self.ui.le_xmax.setText(str(max([d["x"] for d in self.mapdlist])))
        self.ui.le_ymin.setText(str(min([d["y"] for d in self.mapdlist])))
        self.ui.le_ymax.setText(str(max([d["y"] for d in self.mapdlist])))
        self.ui.le_sampleskip.setText(str(0))
        self.ui.le_colskip.setText(str(0))
        self.ui.le_rowskip.setText(str(0))
        self.ui.le_atskip.setText(str(1))
        self.ui.le_samplemin.setText(str(min([d["Sample"] for d in self.mapdlist])))
        self.ui.le_samplemax.setText(str(max([d["Sample"] for d in self.mapdlist])))
    def calcoutput(self):
        """Read stage inputs for alignment, apply sample filters, then calculate aligned positions."""
        self.getguiparams()
        self.applyfilter()
        self.applyskip()
        self.applymaplim()
        self.applysamplelim()
        self.alignmap()
        filteroutput = "Mapped " + str(self.counter) + " locations."
        alignoutput1 = "rot = " + str(self.rot) + ", y-offset = " + str(self.yoff)
        alignoutput2 = "x-skew = " + str(self.skx) + ", y-skew = " + str(self.sky)
        self.logmsg(filteroutput)
        self.logmsg(alignoutput1)
        self.logmsg(alignoutput2)
        self.printlog()
    def saveoutput(self):
        """Write files this time."""
        self.calcoutput()
        self.writefiles()
    def writefiles(self):
        """Write map-aligned '.STG' file using current datetime as filename. Include
        'sample_no.txt' containing list of sample numbers and save both files to
        run directory selected in GUI."""
        self.genbytecode()
        try:
            fn = (
                strftime("%Y%m%d.%H%M%S", localtime())
                + "_map"
                + str(self.map_id)
                + "_pts"
                + str(self.counter)
                + ".stg"
            )
            p = os.path.join(self.rundir, fn)
            fo = open(p, mode="wb")
            fo.write(self.bytecode)
            fo.close()
            self.logmsg("Wrote " + fn + " to run directory.")
        except:
            self.logmsg("Error writing " + fn + " to run directory.")
        try:
            ps = os.path.join(self.rundir, "sample_no.txt")
            fs = open(ps, mode="w")
            fs.write("\n".join(self.samplelist))
            fs.close()
            self.logmsg("Wrote sample_no.txt to run directory.")
        except:
            self.logmsg("Error writing sample_no.txt to run directory.")
    def getguiparams(self):
        """Read GUI fields into object variables."""
        self.paramd = {}
        self.aligntoprint = self.ui.tb_align.currentIndex() == 0
        for linetxt in self.findChildren(QtGui.QLineEdit):
            try:
                self.paramd[str(linetxt.objectName())[3:]] = float(linetxt.text())
            except ValueError:
                self.paramd[str(linetxt.objectName())[3:]] = str(linetxt.text())
        snums = ["sample_a", "sample_b", "sample_c"]
        xkeys = ["stagx_a", "stagx_b", "stagx_c"]
        ykeys = ["stagy_a", "stagy_b", "stagy_c"]
        self.staged = {}
        for i in range(len(snums)):
            try:
                self.staged[snums[i]] = int(self.ui.tw_stage.item(i, 0).text())
            except AttributeError:
                self.logmsg("Sample field is empty. Ignore and assume wafer alignment.")
                self.ui.tb_align.setCurrentIndex(1)
                break
            except ValueError:
                self.logmsg("Invalid sample number.")
                self.ui.tb_align.setCurrentIndex(1)
                break
        for i in range(len(xkeys)):
            try:
                self.staged[xkeys[i]] = float(self.ui.tw_stage.item(i, 1).text())
            except ValueError:
                self.logmsg("Invalid sample x-coord in stage table.")
        for i in range(len(ykeys)):
            try:
                self.staged[ykeys[i]] = float(self.ui.tw_stage.item(i, 2).text())
            except ValueError:
                self.logmsg("Invalid sample y-coord in stage table.")
        self.rotonly = self.ui.cb_rotonly.isChecked()
    def applyfilter(self):
        """ """
        if type(self.parad["keepcode"]) == str:
            codelist = [
                int(code)
                for code in self.paramd["keepcode"]
                .replace(",", " ")
                .replace("\t", " ")
                .split()
            ]
        else:
            codelist = [int(code) for code in self.paramd["keepcode"]]
        chanlist = [
            chan
            for chan in self.paramd["omitch"]
            .replace(",", " ")
            .replace("\t", " ")
            .split()
        ]
        if not any([codelist, chanlist]):
            self.filterdlist = self.mapdlist
        elif codelist and not chanlist:
            self.filterdlist = [d for d in self.mapdlist if d["code"] in codelist]
        elif chanlist and not codelist:
            self.filterdlist = [
                d for d in self.mapdlist if all([d[chan] == 0 for chan in chanlist])
            ]
        else:
            self.filterdlist = [
                d
                for d in self.mapdlist
                if (
                    (d["code"] in codelist) and all([d[chan] == 0 for chan in chanlist])
                )
            ]
    def applyskip(self):
        """ """
        xs = [d["x"] for d in self.mapdlist]
        ys = [d["y"] for d in self.mapdlist]
        setx = list(set(xs))
        setx.sort()
        sety = list(set(ys))
        sety.sort(reverse=True)
        getcol = lambda xval: [i for i in range(len(setx)) if setx[i] == xval][0]
        getrow = lambda yval: [i for i in range(len(sety)) if sety[i] == yval][0]
        for key in ["sampleskip", "colskip", "rowskip"]:
            if self.paramd[key] == "":
                self.paramd[key] = 0
        if self.paramd["atskip"] == "":
            self.paramd["atskip"] = 0.01
        myint = lambda x: 1 if isnan(x) else int(x)
        self.filterdlist = [
            d
            for d in self.filterdlist
            if (
                d["Sample"] % (self.paramd["sampleskip"] + 1) == 0
                and getcol(d["x"]) % (self.paramd["colskip"] + 1) == 0
                and getrow(d["y"]) % (self.paramd["rowskip"] + 1) == 0
            )
        ]  # and \
        # all([myint(d[chan]*100) % int(self.paramd['atskip']*100)==0 for chan in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']]))]
    def applymaplim(self):
        """ """
        self.filterdlist = [
            d
            for d in self.filterdlist
            if (
                d["x"] >= self.paramd["xmin"]
                and d["x"] <= self.paramd["xmax"]
                and d["y"] >= self.paramd["ymin"]
                and d["y"] <= self.paramd["ymax"]
            )
        ]
    def applysamplelim(self):
        """ """
        slist = [
            int(s)
            for s in str(self.ui.te_samplelist.toPlainText())
            .replace(",", " ")
            .replace("\t", " ")
            .replace("\n", " ")
            .split()
        ]
        allsamples = [d["Sample"] for d in self.mapdlist]
        try:
            smin = int(self.paramd["samplemin"])
        except:
            smin = 0
        try:
            smax = int(self.paramd["samplemax"])
        except:
            smax = max(allsamples)
        if len(slist) > 0:
            self.filterdlist = [d for d in self.filterdlist if d["Sample"] in slist]
        self.filterdlist = [
            d for d in self.filterdlist if (d["Sample"] >= smin and d["Sample"] <= smax)
        ]
    def alignmap(self):
        """ """
        if self.aligntoprint:  # align map to print
            self.yoff = 0
            slist = [d["Sample"] for d in self.mapdlist]
            aind = slist.index(self.staged["sample_a"])
            bind = slist.index(self.staged["sample_b"])
            cind = slist.index(self.staged["sample_c"])
            pax = self.mapdlist[aind]["x"]
            pay = self.mapdlist[aind]["y"]
            pbx = self.mapdlist[bind]["x"]
            pby = self.mapdlist[bind]["y"]
            pcx = self.mapdlist[cind]["x"]
            pcy = self.mapdlist[cind]["y"]
            pbax = pax - pbx
            pbay = pay - pby
            pba = (pbax ** 2 + pbay ** 2) ** 0.5
            pbcx = pcx - pbx
            pbcy = pcy - pby
            pbc = (pbcx ** 2 + pbcy ** 2) ** 0.5
            # Orbis x & y diffs (origin sample A)
            sbax = self.staged["stagx_b"] - self.staged["stagx_a"]
            sbay = self.staged["stagy_a"] - self.staged["stagy_b"]
            sba = (sbax ** 2 + sbay ** 2) ** 0.5
            sbcx = self.staged["stagx_b"] - self.staged["stagx_c"]
            sbcy = self.staged["stagy_c"] - self.staged["stagy_b"]
            sbc = (sbcx ** 2 + sbcy ** 2) ** 0.5
            self.rot = math.atan(
                sbcy / sbcx
            )  # epson printer has non-linear elongation in y, use x instead
            if self.rotonly:
                self.skx = 1
                self.sky = 1
            else:
                self.skx = sbc / pbc
                self.sky = sba / pba
        else:  # align map to wafer
            sbcx = self.staged["stagx_b"] - self.staged["stagx_c"]
            sbcy = self.staged["stagy_c"] - self.staged["stagy_b"]
            # y-position of wafer diameter || to flat (y=0)
            hh = 47.3
            # full wafer diameter
            hw = 50.0
            # Si wafer width/2
            self.rot = math.atan(sbcy / sbcx)
            self.yoff = self.staged["stagy_a"] - hh
            self.skx = 1
            self.sky = 1
        self.counter = 0
        self.index = ""
        self.positions = ""
        self.samplelist = []
        for d in self.filterdlist:
            if self.aligntoprint:
                # offset to map point B before stretch
                xn = d["x"] - pbx
                yn = d["y"] - pby
                # apply stretch
                xsk = xn * self.skx
                ysk = yn * self.sky
                # rotate around map point B
                xr = xsk * math.cos(self.rot) - ysk * math.sin(self.rot)
                yr = xsk * math.sin(self.rot) + ysk * math.cos(self.rot)
                xstg = self.staged["stagx_b"] - xr
                ystg = self.staged["stagy_b"] + yr
            else:
                xn = d["x"] - hw
                yn = d["y"] - hh
                xr = xn * math.cos(self.rot) - yn * math.sin(self.rot) + hw
                yr = xn * math.sin(self.rot) + yn * math.cos(self.rot) + hh
                # offset pm by point A coord
                xstg = self.staged["stagx_a"] - xr
                ystg = self.yoff + yr
            checkx = stagx_min <= xstg <= stagx_max
            checky = stagy_min <= ystg <= stagy_max
            if checkx and checky:
                self.counter += 1
                i = struct.pack(
                    "<h", self.counter
                )  # entry index (16-bit short?, 2 bytes), don't use sample number in case we remove out-of-range samples
                i += struct.pack(
                    "<h", 0
                )  # entry type (00 for point, 01 for line, 02 for matrix)
                i += (
                    struct.pack("<h", 1) * 2
                )  # (1) num points to scan (01 for point, for line length, matrix width)
                # (2) num points to scan (01 for point & line, matrix height)
                l = str(int(d["Sample"])) + " " * (
                    16 - len(str(int(d["Sample"])))
                )  # use sample number for stage label, max 16 characters
                i = binascii.hexlify("Center  " + l) + i.encode("hex")
                x = struct.pack("<f", xstg)
                y = struct.pack("<f", ystg)
                z = struct.pack("<f", self.paramd["stagz"])
                p = (
                    x + y + x + y + z + z
                )  # x start, y start, x end, y end, z start, z end
                p += (
                    struct.pack("x") * 4
                )  # 4 byte padding (probably for rotation info but our stage doesn't have that)
                p = p.encode("hex")
                self.index += i
                self.positions += p
                self.samplelist += [str(d["Sample"])]
    def genbytecode(self):
        """ """
        # assemble long strings of bytes (as hex) then unhexlify and write to binary file
        seperator = (
            "0000DD24664052B8884298AEC04285EBB140BE9F3A40486186422F1DC242C3F590400000"
        )
        seperator = seperator + (struct.pack("x") * 60).encode("hex")
        # form header string, need to know # of in-range samples so this comes after for loop
        header = struct.pack("<b", 15)
        header += struct.pack("x") * 15
        header += struct.pack("x") * 2
        header += struct.pack("<h", self.counter)
        header += struct.pack("x") * 20
        header = header.encode("hex")
        # concatenate hex string and convert to byte code
        self.bytecode = binascii.unhexlify(
            header + self.index + seperator + self.positions
        )
if __name__ == "__main__":
    app = QtGui.QApplication(sys.argv)
    myapp = MyApp()
    myapp.show()
    sys.exit(app.exec_())
 | |
| 
	import logging
import time
from unittest import TestCase
from copy import deepcopy
from .. import SocketIO, LoggingNamespace, find_callback
HOST = 'localhost'
PORT = 9000
DATA = 'xxx'
PAYLOAD = {'xxx': 'yyy'}
BIN_DATA = bytearray(b'\xff\xff\xff')
BIN_PAYLOAD = {
    'data': BIN_DATA,
    'array': [bytearray(b'\xee'), bytearray(b'\xdd')]
}
logging.basicConfig(level=logging.DEBUG)
class BaseMixin(object):
    def setUp(self):
        super(BaseMixin, self).setUp()
        self.called_on_response = False
        self.wait_time_in_seconds = 1
    def tearDown(self):
        super(BaseMixin, self).tearDown()
        self.socketIO.disconnect()
    def test_disconnect(self):
        'Disconnect'
        namespace = self.socketIO.define(Namespace)
        self.assertTrue(self.socketIO.connected)
        self.assertFalse(namespace.called_on_disconnect)
        self.socketIO.disconnect()
        self.assertTrue(namespace.called_on_disconnect)
        self.assertFalse(self.socketIO.connected)
    def test_emit(self):
        'Emit'
        namespace = self.socketIO.define(Namespace)
        self.socketIO.emit('emit')
        self.socketIO.wait(self.wait_time_in_seconds)
        self.assertEqual(namespace.args_by_event, {
            'emit_response': (),
        })
    def test_emit_with_payload(self):
        'Emit with payload'
        namespace = self.socketIO.define(Namespace)
        self.socketIO.emit('emit_with_payload', deepcopy(PAYLOAD))
        self.socketIO.wait(self.wait_time_in_seconds)
        self.assertEqual(namespace.args_by_event, {
            'emit_with_payload_response': (PAYLOAD,),
        })
    def test_emit_with_multiple_payloads(self):
        'Emit with multiple payloads'
        namespace = self.socketIO.define(Namespace)
        self.socketIO.emit(
            'emit_with_multiple_payloads',
            deepcopy(PAYLOAD),
            deepcopy(PAYLOAD))
        self.socketIO.wait(self.wait_time_in_seconds)
        self.assertEqual(namespace.args_by_event, {
            'emit_with_multiple_payloads_response': (PAYLOAD, PAYLOAD),
        })
    def test_emit_with_binary_payload(self):
        'Emit with binary payload'
        namespace = self.socketIO.define(Namespace)
        self.socketIO.emit('emit_with_payload', deepcopy(BIN_PAYLOAD))
        self.socketIO.wait(self.wait_time_in_seconds)
        self.assertEqual(namespace.args_by_event, {
            'emit_with_payload_response': (BIN_PAYLOAD,),
        })
    def test_emit_with_callback(self):
        'Emit with callback'
        self.socketIO.emit('emit_with_callback', self.on_response)
        self.socketIO.wait_for_callbacks(seconds=self.wait_time_in_seconds)
        self.assertTrue(self.called_on_response)
    def test_emit_with_callback_with_payload(self):
        'Emit with callback with payload'
        self.socketIO.emit(
            'emit_with_callback_with_payload', self.on_response)
        self.socketIO.wait_for_callbacks(seconds=self.wait_time_in_seconds)
        self.assertTrue(self.called_on_response)
    def test_emit_with_callback_with_multiple_payloads(self):
        'Emit with callback with multiple payloads'
        self.socketIO.emit(
            'emit_with_callback_with_multiple_payloads', self.on_response)
        self.socketIO.wait_for_callbacks(seconds=self.wait_time_in_seconds)
        self.assertTrue(self.called_on_response)
    def test_emit_with_callback_with_binary_payload(self):
        'Emit with callback with binary payload'
        self.socketIO.emit(
            'emit_with_callback_with_binary_payload', self.on_binary_response)
        self.socketIO.wait_for_callbacks(seconds=self.wait_time_in_seconds)
        self.assertTrue(self.called_on_response)
    def test_emit_with_event(self):
        'Emit to trigger an event'
        self.socketIO.on('emit_with_event_response', self.on_response)
        self.socketIO.emit('emit_with_event', PAYLOAD)
        self.socketIO.wait(self.wait_time_in_seconds)
        self.assertTrue(self.called_on_response)
    def test_send(self):
        'Send'
        namespace = self.socketIO.define(Namespace)
        self.socketIO.send()
        self.socketIO.wait(self.wait_time_in_seconds)
        self.assertEqual(namespace.response, 'message_response')
    def test_send_with_data(self):
        'Send with data'
        namespace = self.socketIO.define(Namespace)
        self.socketIO.send(deepcopy(DATA))
        self.socketIO.wait(self.wait_time_in_seconds)
        self.assertEqual(namespace.response, DATA)
    def test_send_with_binary_data(self):
        'Send with binary data'
        namespace = self.socketIO.define(Namespace)
        self.socketIO.send(deepcopy(BIN_DATA))
        self.socketIO.wait(self.wait_time_in_seconds)
        self.assertEqual(namespace.response, BIN_DATA)
    def test_ack(self):
        'Respond to a server callback request'
        namespace = self.socketIO.define(Namespace)
        self.socketIO.emit(
            'trigger_server_expects_callback', deepcopy(PAYLOAD))
        self.socketIO.wait(self.wait_time_in_seconds)
        self.assertEqual(namespace.args_by_event, {
            'server_expects_callback': (PAYLOAD,),
            'server_received_callback': (PAYLOAD,),
        })
    def test_binary_ack(self):
        'Respond to a server callback request with binary data'
        namespace = self.socketIO.define(Namespace)
        self.socketIO.emit(
            'trigger_server_expects_callback', deepcopy(BIN_PAYLOAD))
        self.socketIO.wait(self.wait_time_in_seconds)
        self.assertEqual(namespace.args_by_event, {
            'server_expects_callback': (BIN_PAYLOAD,),
            'server_received_callback': (BIN_PAYLOAD,),
        })
    def test_wait_with_disconnect(self):
        'Exit loop when the client wants to disconnect'
        self.socketIO.define(Namespace)
        self.socketIO.disconnect()
        timeout_in_seconds = 5
        start_time = time.time()
        self.socketIO.wait(timeout_in_seconds)
        self.assertTrue(time.time() - start_time < timeout_in_seconds)
    def test_namespace_emit(self):
        'Behave differently in different namespaces'
        main_namespace = self.socketIO.define(Namespace)
        chat_namespace = self.socketIO.define(Namespace, '/chat')
        news_namespace = self.socketIO.define(Namespace, '/news')
        news_namespace.emit('emit_with_payload', deepcopy(PAYLOAD))
        self.socketIO.wait(self.wait_time_in_seconds)
        self.assertEqual(main_namespace.args_by_event, {})
        self.assertEqual(chat_namespace.args_by_event, {})
        self.assertEqual(news_namespace.args_by_event, {
            'emit_with_payload_response': (PAYLOAD,),
        })
    def test_namespace_emit_with_binary(self):
        'Make sure packet encoding works correctly'
        main_namespace = self.socketIO.define(Namespace)
        chat_namespace = self.socketIO.define(Namespace, '/chat')
        news_namespace = self.socketIO.define(Namespace, '/news')
        news_namespace.emit('emit_with_payload', deepcopy(BIN_PAYLOAD))
        self.socketIO.wait(self.wait_time_in_seconds)
        self.assertEqual(main_namespace.args_by_event, {})
        self.assertEqual(chat_namespace.args_by_event, {})
        self.assertEqual(news_namespace.args_by_event, {
            'emit_with_payload_response': (BIN_PAYLOAD,),
        })
    def test_namespace_ack(self):
        'Respond to a server callback request within a namespace'
        chat_namespace = self.socketIO.define(Namespace, '/chat')
        chat_namespace.emit(
            'trigger_server_expects_callback', deepcopy(PAYLOAD))
        self.socketIO.wait(self.wait_time_in_seconds)
        self.assertEqual(chat_namespace.args_by_event, {
            'server_expects_callback': (PAYLOAD,),
            'server_received_callback': (PAYLOAD,),
        })
    def test_namespace_ack_with_binary(self):
        'Respond to a server callback request within a namespace'
        chat_namespace = self.socketIO.define(Namespace, '/chat')
        chat_namespace.emit(
            'trigger_server_expects_callback', deepcopy(BIN_PAYLOAD))
        self.socketIO.wait(self.wait_time_in_seconds)
        self.assertEqual(chat_namespace.args_by_event, {
            'server_expects_callback': (BIN_PAYLOAD,),
            'server_received_callback': (BIN_PAYLOAD,),
        })
    def on_response(self, *args):
        for arg in args:
            if isinstance(arg, dict):
                self.assertEqual(arg, PAYLOAD)
            else:
                self.assertEqual(arg, DATA)
        self.called_on_response = True
    def on_binary_response(self, *args):
        for arg in args:
            if isinstance(arg, dict):
                self.assertEqual(arg, BIN_PAYLOAD)
            else:
                self.assertEqual(arg, BIN_DATA)
        self.called_on_response = True
class Test_XHR_PollingTransport(BaseMixin, TestCase):
    def setUp(self):
        super(Test_XHR_PollingTransport, self).setUp()
        self.socketIO = SocketIO(HOST, PORT, LoggingNamespace, transports=[
            'xhr-polling'], verify=False)
        self.assertEqual(self.socketIO.transport_name, 'xhr-polling')
class Test_WebsocketTransport(BaseMixin, TestCase):
    def setUp(self):
        super(Test_WebsocketTransport, self).setUp()
        self.socketIO = SocketIO(HOST, PORT, LoggingNamespace, transports=[
            'xhr-polling', 'websocket'], verify=False)
        self.assertEqual(self.socketIO.transport_name, 'websocket')
class Namespace(LoggingNamespace):
    def initialize(self):
        self.called_on_disconnect = False
        self.args_by_event = {}
        self.response = None
    def on_disconnect(self):
        self.called_on_disconnect = True
    def on_wait_with_disconnect_response(self):
        self.disconnect()
    def on_event(self, event, *args):
        callback, args = find_callback(args)
        if callback:
            callback(*args)
        self.args_by_event[event] = args
    def on_message(self, data):
        self.response = data
 | |
| 
	"""
See README.md for a description of the logging API.
OFF state corresponds to having Logger.CURRENT == Logger.DEFAULT
ON state is otherwise
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import os
import sys
import shutil
import os.path as osp
import json
LOG_OUTPUT_FORMATS = ['stdout', 'log', 'json']
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class OutputFormat(object):
    def writekvs(self, kvs):
        """
        Write key-value pairs
        """
        raise NotImplementedError
    def writeseq(self, args):
        """
        Write a sequence of other data (e.g. a logging message)
        """
        pass
    def close(self):
        return
class HumanOutputFormat(OutputFormat):
    def __init__(self, file):
        self.file = file
    def writekvs(self, kvs):
        # Create strings for printing
        key2str = OrderedDict()
        for (key, val) in kvs.items():
            valstr = '%-8.3g' % (val,) if hasattr(val, '__float__') else val
            key2str[self._truncate(key)] = self._truncate(valstr)
        # Find max widths
        keywidth = max(map(len, key2str.keys()))
        valwidth = max(map(len, key2str.values()))
        # Write out the data
        dashes = '-' * (keywidth + valwidth + 7)
        lines = [dashes]
        for (key, val) in key2str.items():
            lines.append('| %s%s | %s%s |' % (
                key,
                ' ' * (keywidth - len(key)),
                val,
                ' ' * (valwidth - len(val)),
            ))
        lines.append(dashes)
        self.file.write('\n'.join(lines) + '\n')
        # Flush the output to the file
        self.file.flush()
    def _truncate(self, s):
        return s[:20] + '...' if len(s) > 23 else s
    def writeseq(self, args):
        for arg in args:
            self.file.write(arg)
        self.file.write('\n')
        self.file.flush()
class JSONOutputFormat(OutputFormat):
    def __init__(self, file):
        self.file = file
    def writekvs(self, kvs):
        for k, v in kvs.items():
            if hasattr(v, 'dtype'):
                v = v.tolist()
                kvs[k] = float(v)
        self.file.write(json.dumps(kvs) + '\n')
        self.file.flush()
def make_output_format(format, ev_dir):
    os.makedirs(ev_dir, exist_ok=True)
    if format == 'stdout':
        return HumanOutputFormat(sys.stdout)
    elif format == 'log':
        log_file = open(osp.join(ev_dir, 'log.txt'), 'wt')
        return HumanOutputFormat(log_file)
    elif format == 'json':
        json_file = open(osp.join(ev_dir, 'progress.json'), 'wt')
        return JSONOutputFormat(json_file)
    else:
        raise ValueError('Unknown format specified: %s' % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
    """
    Log a value of some diagnostic
    Call this once for each diagnostic quantity, each iteration
    """
    Logger.CURRENT.logkv(key, val)
def dumpkvs():
    """
    Write all of the diagnostics from the current iteration
    level: int. (see logger.py docs) If the global logger level is higher than
          the level argument here, don't print to stdout.
    """
    Logger.CURRENT.dumpkvs()
# for backwards compatibility
record_tabular = logkv
dump_tabular = dumpkvs
def log(*args, **kwargs):
    """
    Write the sequence of args, with no separators, to the console and output
    files (if you've configured an output file).
    """
    if "level" in kwargs:
        level = kwargs["level"]
    else:
        level = INFO
    Logger.CURRENT.log(*args, level=level)
def debug(*args):
    log(*args, level=DEBUG)
def info(*args):
    log(*args, level=INFO)
def warn(*args):
    log(*args, level=WARN)
def error(*args):
    log(*args, level=ERROR)
def set_level(level):
    """
    Set logging threshold on current logger.
    """
    Logger.CURRENT.set_level(level)
def get_dir():
    """
    Get directory that log files are being written to.
    will be None if there is no output directory (i.e., if you didn't call
    start)
    """
    return Logger.CURRENT.get_dir()
def get_expt_dir():
    sys.stderr.write(
        "get_expt_dir() is Deprecated. Switch to get_dir() [%s]\n" %
        (get_dir(),))
    return get_dir()
# ================================================================
# Backend
# ================================================================
class Logger(object):
    # A logger with no output files. (See right below class definition)
    # So that you can still log to the terminal without setting up any output
    DEFAULT = None
    # Current logger being used by the free functions above
    CURRENT = None
    def __init__(self, dir, output_formats):
        self.name2val = OrderedDict()  # values this iteration
        self.level = INFO
        self.dir = dir
        self.output_formats = output_formats
    # Logging API, forwarded
    # ----------------------------------------
    def logkv(self, key, val):
        self.name2val[key] = val
    def dumpkvs(self):
        for fmt in self.output_formats:
            fmt.writekvs(self.name2val)
        self.name2val.clear()
    def log(self, *args, **kwargs):
        if "level" in kwargs:
            level = kwargs["level"]
        else:
            level = INFO
        if self.level <= level:
            self._do_log(args)
    # Configuration
    # ----------------------------------------
    def set_level(self, level):
        self.level = level
    def get_dir(self):
        return self.dir
    def close(self):
        for fmt in self.output_formats:
            fmt.close()
    # Misc
    # ----------------------------------------
    def _do_log(self, args):
        for fmt in self.output_formats:
            fmt.writeseq(args)
# ================================================================
Logger.DEFAULT = Logger(
    output_formats=[HumanOutputFormat(sys.stdout)], dir=None)
Logger.CURRENT = Logger.DEFAULT
class session(object):
    """
    Context manager that sets up the loggers for an experiment.
    """
    CURRENT = None  # Set to a LoggerContext object using enter/exit or cm
    def __init__(self, dir, format_strs=None):
        self.dir = dir
        if format_strs is None:
            format_strs = LOG_OUTPUT_FORMATS
        output_formats = [make_output_format(f, dir) for f in format_strs]
        Logger.CURRENT = Logger(dir=dir, output_formats=output_formats)
    def __enter__(self):
        os.makedirs(self.evaluation_dir(), exist_ok=True)
        output_formats = [
            make_output_format(
                f, self.evaluation_dir()) for f in LOG_OUTPUT_FORMATS]
        Logger.CURRENT = Logger(dir=self.dir, output_formats=output_formats)
    def __exit__(self, *args):
        Logger.CURRENT.close()
        Logger.CURRENT = Logger.DEFAULT
    def evaluation_dir(self):
        return self.dir
# ================================================================
def _demo():
    info("hi")
    debug("shouldn't appear")
    set_level(DEBUG)
    debug("should appear")
    dir = "/tmp/testlogging"
    if os.path.exists(dir):
        shutil.rmtree(dir)
    with session(dir=dir):
        record_tabular("a", 3)
        record_tabular("b", 2.5)
        dump_tabular()
        record_tabular("b", -2.5)
        record_tabular("a", 5.5)
        dump_tabular()
        info("^^^ should see a = 5.5")
    record_tabular("b", -2.5)
    dump_tabular()
    record_tabular("a", "longasslongasslongasslongasslongasslongassvalue")
    dump_tabular()
if __name__ == "__main__":
    _demo()
 | |
| 
	# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import unicode_literals
from botocore.vendored import requests
from botocore.vendored.requests.packages import urllib3
def _exception_from_packed_args(exception_cls, args=None, kwargs=None):
    # This is helpful for reducing Exceptions that only accept kwargs as
    # only positional arguments can be provided for __reduce__
    # Ideally, this would also be a class method on the BotoCoreError
    # but instance methods cannot be pickled.
    if args is None:
        args = ()
    if kwargs is None:
        kwargs = {}
    return exception_cls(*args, **kwargs)
class BotoCoreError(Exception):
    """
    The base exception class for BotoCore exceptions.
    :ivar msg: The descriptive message associated with the error.
    """
    fmt = 'An unspecified error occurred'
    def __init__(self, **kwargs):
        msg = self.fmt.format(**kwargs)
        Exception.__init__(self, msg)
        self.kwargs = kwargs
    def __reduce__(self):
        return _exception_from_packed_args, (self.__class__, None, self.kwargs)
class DataNotFoundError(BotoCoreError):
    """
    The data associated with a particular path could not be loaded.
    :ivar path: The data path that the user attempted to load.
    """
    fmt = 'Unable to load data for: {data_path}'
class UnknownServiceError(DataNotFoundError):
    """Raised when trying to load data for an unknown service.
    :ivar service_name: The name of the unknown service.
    """
    fmt = (
        "Unknown service: '{service_name}'. Valid service names are: "
        "{known_service_names}")
class ApiVersionNotFoundError(BotoCoreError):
    """
    The data associated with either that API version or a compatible one
    could not be loaded.
    :ivar path: The data path that the user attempted to load.
    :ivar path: The API version that the user attempted to load.
    """
    fmt = 'Unable to load data {data_path} for: {api_version}'
class HTTPClientError(BotoCoreError):
    fmt = 'An HTTP Client raised and unhandled exception: {error}'
    def __init__(self, request=None, response=None, **kwargs):
        self.request = request
        self.response = response
        super(HTTPClientError, self).__init__(**kwargs)
    def __reduce__(self):
        return _exception_from_packed_args, (
            self.__class__, (self.request, self.response), self.kwargs)
class ConnectionError(BotoCoreError):
    fmt = 'An HTTP Client failed to establish a connection: {error}'
class EndpointConnectionError(ConnectionError):
    fmt = 'Could not connect to the endpoint URL: "{endpoint_url}"'
class SSLError(ConnectionError, requests.exceptions.SSLError):
    fmt = 'SSL validation failed for {endpoint_url} {error}'
class ConnectionClosedError(HTTPClientError):
    fmt = (
        'Connection was closed before we received a valid response '
        'from endpoint URL: "{endpoint_url}".')
class ReadTimeoutError(HTTPClientError, requests.exceptions.ReadTimeout,
                       urllib3.exceptions.ReadTimeoutError):
    fmt = 'Read timeout on endpoint URL: "{endpoint_url}"'
class ConnectTimeoutError(ConnectionError, requests.exceptions.ConnectTimeout):
    fmt = 'Connect timeout on endpoint URL: "{endpoint_url}"'
class ProxyConnectionError(ConnectionError, requests.exceptions.ProxyError):
    fmt = 'Failed to connect to proxy URL: "{proxy_url}"'
class NoCredentialsError(BotoCoreError):
    """
    No credentials could be found
    """
    fmt = 'Unable to locate credentials'
class PartialCredentialsError(BotoCoreError):
    """
    Only partial credentials were found.
    :ivar cred_var: The missing credential variable name.
    """
    fmt = 'Partial credentials found in {provider}, missing: {cred_var}'
class CredentialRetrievalError(BotoCoreError):
    """
    Error attempting to retrieve credentials from a remote source.
    :ivar provider: The name of the credential provider.
    :ivar error_msg: The msg explaning why credentials could not be
        retrieved.
    """
    fmt = 'Error when retrieving credentials from {provider}: {error_msg}'
class UnknownSignatureVersionError(BotoCoreError):
    """
    Requested Signature Version is not known.
    :ivar signature_version: The name of the requested signature version.
    """
    fmt = 'Unknown Signature Version: {signature_version}.'
class ServiceNotInRegionError(BotoCoreError):
    """
    The service is not available in requested region.
    :ivar service_name: The name of the service.
    :ivar region_name: The name of the region.
    """
    fmt = 'Service {service_name} not available in region {region_name}'
class BaseEndpointResolverError(BotoCoreError):
    """Base error for endpoint resolving errors.
    Should never be raised directly, but clients can catch
    this exception if they want to generically handle any errors
    during the endpoint resolution process.
    """
class NoRegionError(BaseEndpointResolverError):
    """No region was specified."""
    fmt = 'You must specify a region.'
class UnknownEndpointError(BaseEndpointResolverError, ValueError):
    """
    Could not construct an endpoint.
    :ivar service_name: The name of the service.
    :ivar region_name: The name of the region.
    """
    fmt = (
        'Unable to construct an endpoint for '
        '{service_name} in region {region_name}')
class ProfileNotFound(BotoCoreError):
    """
    The specified configuration profile was not found in the
    configuration file.
    :ivar profile: The name of the profile the user attempted to load.
    """
    fmt = 'The config profile ({profile}) could not be found'
class ConfigParseError(BotoCoreError):
    """
    The configuration file could not be parsed.
    :ivar path: The path to the configuration file.
    """
    fmt = 'Unable to parse config file: {path}'
class ConfigNotFound(BotoCoreError):
    """
    The specified configuration file could not be found.
    :ivar path: The path to the configuration file.
    """
    fmt = 'The specified config file ({path}) could not be found.'
class MissingParametersError(BotoCoreError):
    """
    One or more required parameters were not supplied.
    :ivar object: The object that has missing parameters.
        This can be an operation or a parameter (in the
        case of inner params).  The str() of this object
        will be used so it doesn't need to implement anything
        other than str().
    :ivar missing: The names of the missing parameters.
    """
    fmt = ('The following required parameters are missing for '
           '{object_name}: {missing}')
class ValidationError(BotoCoreError):
    """
    An exception occurred validating parameters.
    Subclasses must accept a ``value`` and ``param``
    argument in their ``__init__``.
    :ivar value: The value that was being validated.
    :ivar param: The parameter that failed validation.
    :ivar type_name: The name of the underlying type.
    """
    fmt = ("Invalid value ('{value}') for param {param} "
           "of type {type_name} ")
class ParamValidationError(BotoCoreError):
    fmt = 'Parameter validation failed:\n{report}'
# These exceptions subclass from ValidationError so that code
# can just 'except ValidationError' to catch any possibly validation
# error.
class UnknownKeyError(ValidationError):
    """
    Unknown key in a struct paramster.
    :ivar value: The value that was being checked.
    :ivar param: The name of the parameter.
    :ivar choices: The valid choices the value can be.
    """
    fmt = ("Unknown key '{value}' for param '{param}'.  Must be one "
           "of: {choices}")
class RangeError(ValidationError):
    """
    A parameter value was out of the valid range.
    :ivar value: The value that was being checked.
    :ivar param: The parameter that failed validation.
    :ivar min_value: The specified minimum value.
    :ivar max_value: The specified maximum value.
    """
    fmt = ('Value out of range for param {param}: '
           '{min_value} <= {value} <= {max_value}')
class UnknownParameterError(ValidationError):
    """
    Unknown top level parameter.
    :ivar name: The name of the unknown parameter.
    :ivar operation: The name of the operation.
    :ivar choices: The valid choices the parameter name can be.
    """
    fmt = (
        "Unknown parameter '{name}' for operation {operation}.  Must be one "
        "of: {choices}"
    )
class AliasConflictParameterError(ValidationError):
    """
    Error when an alias is provided for a parameter as well as the original.
    :ivar original: The name of the original parameter.
    :ivar alias: The name of the alias
    :ivar operation: The name of the operation.
    """
    fmt = (
        "Parameter '{original}' and its alias '{alias}' were provided "
        "for operation {operation}.  Only one of them may be used."
    )
class UnknownServiceStyle(BotoCoreError):
    """
    Unknown style of service invocation.
    :ivar service_style: The style requested.
    """
    fmt = 'The service style ({service_style}) is not understood.'
class PaginationError(BotoCoreError):
    fmt = 'Error during pagination: {message}'
class OperationNotPageableError(BotoCoreError):
    fmt = 'Operation cannot be paginated: {operation_name}'
class ChecksumError(BotoCoreError):
    """The expected checksum did not match the calculated checksum.
    """
    fmt = ('Checksum {checksum_type} failed, expected checksum '
           '{expected_checksum} did not match calculated checksum '
           '{actual_checksum}.')
class UnseekableStreamError(BotoCoreError):
    """Need to seek a stream, but stream does not support seeking.
    """
    fmt = ('Need to rewind the stream {stream_object}, but stream '
           'is not seekable.')
class WaiterError(BotoCoreError):
    """Waiter failed to reach desired state."""
    fmt = 'Waiter {name} failed: {reason}'
    def __init__(self, name, reason, last_response):
        super(WaiterError, self).__init__(name=name, reason=reason)
        self.last_response = last_response
class IncompleteReadError(BotoCoreError):
    """HTTP response did not return expected number of bytes."""
    fmt = ('{actual_bytes} read, but total bytes '
           'expected is {expected_bytes}.')
class InvalidExpressionError(BotoCoreError):
    """Expression is either invalid or too complex."""
    fmt = 'Invalid expression {expression}: Only dotted lookups are supported.'
class UnknownCredentialError(BotoCoreError):
    """Tried to insert before/after an unregistered credential type."""
    fmt = 'Credential named {name} not found.'
class WaiterConfigError(BotoCoreError):
    """Error when processing waiter configuration."""
    fmt = 'Error processing waiter config: {error_msg}'
class UnknownClientMethodError(BotoCoreError):
    """Error when trying to access a method on a client that does not exist."""
    fmt = 'Client does not have method: {method_name}'
class UnsupportedSignatureVersionError(BotoCoreError):
    """Error when trying to access a method on a client that does not exist."""
    fmt = 'Signature version is not supported: {signature_version}'
class ClientError(Exception):
    MSG_TEMPLATE = (
        'An error occurred ({error_code}) when calling the {operation_name} '
        'operation{retry_info}: {error_message}')
    def __init__(self, error_response, operation_name):
        retry_info = self._get_retry_info(error_response)
        error = error_response.get('Error', {})
        msg = self.MSG_TEMPLATE.format(
            error_code=error.get('Code', 'Unknown'),
            error_message=error.get('Message', 'Unknown'),
            operation_name=operation_name,
            retry_info=retry_info,
        )
        super(ClientError, self).__init__(msg)
        self.response = error_response
        self.operation_name = operation_name
    def _get_retry_info(self, response):
        retry_info = ''
        if 'ResponseMetadata' in response:
            metadata = response['ResponseMetadata']
            if metadata.get('MaxAttemptsReached', False):
                if 'RetryAttempts' in metadata:
                    retry_info = (' (reached max retries: %s)' %
                                  metadata['RetryAttempts'])
        return retry_info
    def __reduce__(self):
        # Subclasses of ClientError's are dynamically generated and
        # cannot be pickled unless they are attributes of a
        # module. So at the very least return a ClientError back.
        return ClientError, (self.response, self.operation_name)
class EventStreamError(ClientError):
    pass
class UnsupportedTLSVersionWarning(Warning):
    """Warn when an openssl version that uses TLS 1.2 is required"""
    pass
class ImminentRemovalWarning(Warning):
    pass
class InvalidDNSNameError(BotoCoreError):
    """Error when virtual host path is forced on a non-DNS compatible bucket"""
    fmt = (
        'Bucket named {bucket_name} is not DNS compatible. Virtual '
        'hosted-style addressing cannot be used. The addressing style '
        'can be configured by removing the addressing_style value '
        'or setting that value to \'path\' or \'auto\' in the AWS Config '
        'file or in the botocore.client.Config object.'
    )
class InvalidS3AddressingStyleError(BotoCoreError):
    """Error when an invalid path style is specified"""
    fmt = (
        'S3 addressing style {s3_addressing_style} is invaild. Valid options '
        'are: \'auto\', \'virtual\', and \'path\''
    )
class UnsupportedS3ArnError(BotoCoreError):
    """Error when S3 arn provided to Bucket parameter is not supported"""
    fmt = (
        'S3 ARN {arn} provided to "Bucket" parameter is invalid. Only '
        'ARNs for S3 access-points are supported.'
    )
class UnsupportedS3AccesspointConfigurationError(BotoCoreError):
    """Error when an unsupported configuration is used with access-points"""
    fmt = (
        'Unsupported configuration when using S3 access-points: {msg}'
    )
class InvalidRetryConfigurationError(BotoCoreError):
    """Error when invalid retry configuration is specified"""
    fmt = (
        'Cannot provide retry configuration for "{retry_config_option}". '
        'Valid retry configuration options are: \'max_attempts\''
    )
class InvalidMaxRetryAttemptsError(InvalidRetryConfigurationError):
    """Error when invalid retry configuration is specified"""
    fmt = (
        'Value provided to "max_attempts": {provided_max_attempts} must '
        'be an integer greater than or equal to {min_value}.'
    )
class InvalidRetryModeError(InvalidRetryConfigurationError):
    """Error when invalid retry mode configuration is specified"""
    fmt = (
        'Invalid value provided to "mode": "{provided_retry_mode}" must '
        'be one of: "legacy", "standard", "adaptive"'
    )
class InvalidS3UsEast1RegionalEndpointConfigError(BotoCoreError):
    """Error for invalid s3 us-east-1 regional endpoints configuration"""
    fmt = (
        'S3 us-east-1 regional endpoint option '
        '{s3_us_east_1_regional_endpoint_config} is '
        'invaild. Valid options are: legacy and regional'
    )
class InvalidSTSRegionalEndpointsConfigError(BotoCoreError):
    """Error when invalid sts regional endpoints configuration is specified"""
    fmt = (
        'STS regional endpoints option {sts_regional_endpoints_config} is '
        'invaild. Valid options are: legacy and regional'
    )
class StubResponseError(BotoCoreError):
    fmt = 'Error getting response stub for operation {operation_name}: {reason}'
class StubAssertionError(StubResponseError, AssertionError):
    pass
class UnStubbedResponseError(StubResponseError):
    pass
class InvalidConfigError(BotoCoreError):
    fmt = '{error_msg}'
class InfiniteLoopConfigError(InvalidConfigError):
    fmt = (
        'Infinite loop in credential configuration detected. Attempting to '
        'load from profile {source_profile} which has already been visited. '
        'Visited profiles: {visited_profiles}'
    )
class RefreshWithMFAUnsupportedError(BotoCoreError):
    fmt = 'Cannot refresh credentials: MFA token required.'
class MD5UnavailableError(BotoCoreError):
    fmt = "This system does not support MD5 generation."
class MetadataRetrievalError(BotoCoreError):
    fmt = "Error retrieving metadata: {error_msg}"
class UndefinedModelAttributeError(Exception):
    pass
class MissingServiceIdError(UndefinedModelAttributeError):
    fmt = (
        "The model being used for the service {service_name} is missing the "
        "serviceId metadata property, which is required."
    )
    def __init__(self, **kwargs):
        msg = self.fmt.format(**kwargs)
        Exception.__init__(self, msg)
        self.kwargs = kwargs
class CapacityNotAvailableError(BotoCoreError):
    fmt = (
        'Insufficient request capacity available.'
    )
 | |
| 
	"""
Filtering functions
"""
import logging
import os
import numpy as np
from scipy.signal import butter, filtfilt
def _butterworth(ts, low_frequency, high_factor, order, sampling_frequency):
    """Butterworth filter
    Parameters
    ----------
    ts: np.array
        T  numpy array, where T is the number of time samples
    low_frequency: int
        Low pass frequency (Hz)
    high_factor: float
        High pass factor (proportion of sampling rate)
    order: int
        Order of Butterworth filter
    sampling_frequency: int
        Sampling frequency (Hz)
    Notes
    -----
    This function can only be applied to a one dimensional array, to apply
    it to multiple channels use butterworth
    Raises
    ------
    NotImplementedError
        If a multidmensional array is passed
    """
    low = float(low_frequency) / sampling_frequency * 2
    high = float(high_factor) * 2
    b, a = butter(order, low, btype='high', analog=False)
    if ts.ndim == 1:
        return filtfilt(b, a, ts)
    else:
        T, C = ts.shape
        output = np.zeros((T, C), 'float32')
        for c in range(C):
            output[:, c] = filtfilt(b, a, ts[:, c])
        return output
def _mean_standard_deviation(rec, centered=False):
    """Determine standard deviation of noise in each channel
    Parameters
    ----------
    rec : matrix [length of recording, number of channels]
    centered : bool
        if not standardized, center it
    Returns
    -------
    sd : vector [number of channels]
        standard deviation in each channel
    """
    # find standard deviation using robust method
    if not centered:
        centers = np.mean(rec, axis=0)
        rec = rec - centers[None]
    else:
        centers = np.zeros(rec.shape[1], 'float32')
    return np.median(np.abs(rec), 0)/0.6745, centers
def _standardize(rec, sd=None, centers=None):
    """Determine standard deviation of noise in each channel
    Parameters
    ----------
    rec : matrix [length of recording, number of channels]
        recording
    sd : vector [number of chnanels,]
        standard deviation
    centered : bool
        if not standardized, center it
    Returns
    -------
    matrix [length of recording, number of channels]
        standardized recording
    """
    # find standard deviation using robust method
    if (sd is None) or (centers is None):
        sd, centers = _mean_standard_deviation(rec, centered=False)
    # standardize all channels with SD> 0.1 (Voltage?) units
    # Cat: TODO: ensure that this is actually correct for all types of channels
    idx1 = np.where(sd>=0.1)[0]
    rec[:,idx1] = np.divide(rec[:,idx1] - centers[idx1][None], sd[idx1])
    
    # zero out bad channels
    idx2 = np.where(sd<0.1)[0]
    rec[:,idx2]=0.
    
    return rec
    #return np.divide(rec, sd)
def filter_standardize_batch(batch_id, reader, fname_mean_sd,
                             apply_filter, out_dtype, output_directory,
                             low_frequency=None, high_factor=None,
                             order=None, sampling_frequency=None):
    """Butterworth filter for a one dimensional time series
    Parameters
    ----------
    ts: np.array
        T  numpy array, where T is the number of time samples
    low_frequency: int
        Low pass frequency (Hz)
    high_factor: float
        High pass factor (proportion of sampling rate)
    order: int
        Order of Butterworth filter
    sampling_frequency: int
        Sampling frequency (Hz)
    Notes
    -----
    This function can only be applied to a one dimensional array, to apply
    it to multiple channels use butterworth
    Raises
    ------
    NotImplementedError
        If a multidmensional array is passed
    """
    logger = logging.getLogger(__name__)
    
    # filter
    if apply_filter:
        # read a batch
        ts = reader.read_data_batch(batch_id, add_buffer=True)
        ts = _butterworth(ts, low_frequency, high_factor,
                              order, sampling_frequency)
        ts = ts[reader.buffer:-reader.buffer]
    else:
        ts = reader.read_data_batch(batch_id, add_buffer=False)
    # standardize
    temp = np.load(fname_mean_sd)
    sd = temp['sd']
    centers = temp['centers']
    ts = _standardize(ts, sd, centers)
    
    # save
    fname = os.path.join(
        output_directory,
        "standardized_{}.npy".format(
            str(batch_id).zfill(6)))
    np.save(fname, ts.astype(out_dtype))
    #fname = os.path.join(
    #    output_directory,
    #    "standardized_{}.bin".format(
    #        str(batch_id).zfill(6)))
    #f = open(fname, 'wb')
    #f.write(ts.astype(out_dtype))
def get_std(ts,
            sampling_frequency,
            fname,
            apply_filter=False, 
            low_frequency=None,
            high_factor=None,
            order=None):
    """Butterworth filter for a one dimensional time series
    Parameters
    ----------
    ts: np.array
        T  numpy array, where T is the number of time samples
    low_frequency: int
        Low pass frequency (Hz)
    high_factor: float
        High pass factor (proportion of sampling rate)
    order: int
        Order of Butterworth filter
    sampling_frequency: int
        Sampling frequency (Hz)
    Notes
    -----
    This function can only be applied to a one dimensional array, to apply
    it to multiple channels use butterworth
    Raises
    ------
    NotImplementedError
        If a multidmensional array is passed
    """
    # filter
    if apply_filter:
        ts = _butterworth(ts, low_frequency, high_factor,
                          order, sampling_frequency)
    # standardize
    sd, centers = _mean_standard_deviation(ts)
    
    # save
    np.savez(fname,
             centers=centers,
             sd=sd)
def merge_filtered_files(filtered_location, output_directory):
    logger = logging.getLogger(__name__)
    filenames = os.listdir(filtered_location)
    filenames_sorted = sorted(filenames)
    f_out = os.path.join(output_directory, "standardized.bin")
    logger.info('...saving standardized file: %s', f_out)
    f = open(f_out, 'wb')
    for fname in filenames_sorted:
        res = np.load(os.path.join(filtered_location, fname))
        res.tofile(f)
        os.remove(os.path.join(filtered_location, fname))
 | |
| 
	# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.
"""
Volume manager manages creating, attaching, detaching, and persistent storage.
Persistent storage volumes keep their state independent of instances.  You can
attach to an instance, terminate the instance, spawn a new instance (even
one from a different image) and re-attach the volume with the same data
intact.
**Related Flags**
:volume_topic:  What :mod:`rpc` topic to listen to (default: `cinder-volume`).
:volume_manager:  The module name of a class derived from
                  :class:`manager.Manager` (default:
                  :class:`cinder.volume.manager.Manager`).
:volume_driver:  Used by :class:`Manager`.  Defaults to
                 :class:`cinder.volume.drivers.lvm.LVMISCSIDriver`.
:volume_group:  Name of the group that will contain exported volumes (default:
                `cinder-volumes`)
:num_shell_tries:  Number of times to attempt to run commands (default: 3)
"""
import time
from oslo.config import cfg
from oslo import messaging
from cinder import compute
from cinder import context
from cinder import exception
from cinder import flow_utils
from cinder.image import glance
from cinder import manager
from cinder.openstack.common import excutils
from cinder.openstack.common import importutils
from cinder.openstack.common import jsonutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import periodic_task
from cinder.openstack.common import timeutils
from cinder.openstack.common import uuidutils
from cinder import quota
from cinder import utils
from cinder.volume.configuration import Configuration
from cinder.volume.flows.manager import create_volume
from cinder.volume.flows.manager import manage_existing
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
from cinder.zonemanager.fc_zone_manager import ZoneManager
from eventlet.greenpool import GreenPool
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
volume_manager_opts = [
    cfg.StrOpt('volume_driver',
               default='cinder.volume.drivers.lvm.LVMISCSIDriver',
               help='Driver to use for volume creation'),
    cfg.IntOpt('migration_create_volume_timeout_secs',
               default=300,
               help='Timeout for creating the volume to migrate to '
                    'when performing volume migration (seconds)'),
    cfg.BoolOpt('volume_service_inithost_offload',
                default=False,
                help='Offload pending volume delete during '
                     'volume service startup'),
    cfg.StrOpt('zoning_mode',
               default='none',
               help='FC Zoning mode configured'),
    cfg.StrOpt('extra_capabilities',
               default='{}',
               help='User defined capabilities, a JSON formatted string '
                    'specifying key/value pairs.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
MAPPING = {
    'cinder.volume.drivers.storwize_svc.StorwizeSVCDriver':
    'cinder.volume.drivers.ibm.storwize_svc.StorwizeSVCDriver',
    'cinder.volume.drivers.xiv_ds8k.XIVDS8KDriver':
    'cinder.volume.drivers.ibm.xiv_ds8k.XIVDS8KDriver',
    'cinder.volume.drivers.san.hp_lefthand.HpSanISCSIDriver':
    'cinder.volume.drivers.san.hp.hp_lefthand_iscsi.HPLeftHandISCSIDriver',
    'cinder.volume.drivers.gpfs.GPFSDriver':
    'cinder.volume.drivers.ibm.gpfs.GPFSDriver', }
def locked_volume_operation(f):
    """Lock decorator for volume operations.
    Takes a named lock prior to executing the operation. The lock is named with
    the operation executed and the id of the volume. This lock can then be used
    by other operations to avoid operation conflicts on shared volumes.
    Example use:
    If a volume operation uses this decorator, it will block until the named
    lock is free. This is used to protect concurrent operations on the same
    volume e.g. delete VolA while create volume VolB from VolA is in progress.
    """
    def lvo_inner1(inst, context, volume_id, **kwargs):
        @utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
        def lvo_inner2(*_args, **_kwargs):
            return f(*_args, **_kwargs)
        return lvo_inner2(inst, context, volume_id, **kwargs)
    return lvo_inner1
def locked_snapshot_operation(f):
    """Lock decorator for snapshot operations.
    Takes a named lock prior to executing the operation. The lock is named with
    the operation executed and the id of the snapshot. This lock can then be
    used by other operations to avoid operation conflicts on shared snapshots.
    Example use:
    If a snapshot operation uses this decorator, it will block until the named
    lock is free. This is used to protect concurrent operations on the same
    snapshot e.g. delete SnapA while create volume VolA from SnapA is in
    progress.
    """
    def lso_inner1(inst, context, snapshot_id, **kwargs):
        @utils.synchronized("%s-%s" % (snapshot_id, f.__name__), external=True)
        def lso_inner2(*_args, **_kwargs):
            return f(*_args, **_kwargs)
        return lso_inner2(inst, context, snapshot_id, **kwargs)
    return lso_inner1
class VolumeManager(manager.SchedulerDependentManager):
    """Manages attachable block storage devices."""
    RPC_API_VERSION = '1.16'
    target = messaging.Target(version=RPC_API_VERSION)
    def __init__(self, volume_driver=None, service_name=None,
                 *args, **kwargs):
        """Load the driver from the one specified in args, or from flags."""
        # update_service_capabilities needs service_name to be volume
        super(VolumeManager, self).__init__(service_name='volume',
                                            *args, **kwargs)
        self.configuration = Configuration(volume_manager_opts,
                                           config_group=service_name)
        self._tp = GreenPool()
        self.stats = {}
        if not volume_driver:
            # Get from configuration, which will get the default
            # if its not using the multi backend
            volume_driver = self.configuration.volume_driver
        if volume_driver in MAPPING:
            LOG.warn(_("Driver path %s is deprecated, update your "
                       "configuration to the new path."), volume_driver)
            volume_driver = MAPPING[volume_driver]
        self.driver = importutils.import_object(
            volume_driver,
            configuration=self.configuration,
            db=self.db,
            host=self.host)
        self.zonemanager = None
        try:
            self.extra_capabilities = jsonutils.loads(
                self.driver.configuration.extra_capabilities)
        except AttributeError:
            self.extra_capabilities = {}
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error("Invalid JSON: %s" %
                          self.driver.configuration.extra_capabilities)
    def _add_to_threadpool(self, func, *args, **kwargs):
        self._tp.spawn_n(func, *args, **kwargs)
    def init_host(self):
        """Do any initialization that needs to be run if this is a
           standalone service.
        """
        ctxt = context.get_admin_context()
        if self.configuration.safe_get('zoning_mode') == 'fabric':
            self.zonemanager = ZoneManager(configuration=self.configuration)
            LOG.info(_("Starting FC Zone Manager %(zm_version)s,"
                       " Driver %(drv_name)s %(drv_version)s") %
                     {'zm_version': self.zonemanager.get_version(),
                      'drv_name': self.zonemanager.driver.__class__.__name__,
                      'drv_version': self.zonemanager.driver.get_version()})
        LOG.info(_("Starting volume driver %(driver_name)s (%(version)s)") %
                 {'driver_name': self.driver.__class__.__name__,
                  'version': self.driver.get_version()})
        try:
            self.driver.do_setup(ctxt)
            self.driver.check_for_setup_error()
        except Exception as ex:
            LOG.error(_("Error encountered during "
                        "initialization of driver: %(name)s") %
                      {'name': self.driver.__class__.__name__})
            LOG.exception(ex)
            # we don't want to continue since we failed
            # to initialize the driver correctly.
            return
        volumes = self.db.volume_get_all_by_host(ctxt, self.host)
        LOG.debug("Re-exporting %s volumes", len(volumes))
        try:
            sum = 0
            self.stats.update({'allocated_capacity_gb': sum})
            for volume in volumes:
                if volume['status'] in ['in-use']:
                    # calculate allocated capacity for driver
                    sum += volume['size']
                    self.stats['allocated_capacity_gb'] = sum
                    try:
                        self.driver.ensure_export(ctxt, volume)
                    except Exception as export_ex:
                        LOG.error(_("Failed to re-export volume %s: "
                                    "setting to error state"), volume['id'])
                        LOG.exception(export_ex)
                        self.db.volume_update(ctxt,
                                              volume['id'],
                                              {'status': 'error'})
                elif volume['status'] == 'downloading':
                    LOG.info(_("volume %s stuck in a downloading state"),
                             volume['id'])
                    self.driver.clear_download(ctxt, volume)
                    self.db.volume_update(ctxt,
                                          volume['id'],
                                          {'status': 'error'})
                else:
                    LOG.info(_("volume %s: skipping export"), volume['id'])
        except Exception as ex:
            LOG.error(_("Error encountered during "
                        "re-exporting phase of driver initialization: "
                        " %(name)s") %
                      {'name': self.driver.__class__.__name__})
            LOG.exception(ex)
            return
        # at this point the driver is considered initialized.
        self.driver.set_initialized()
        LOG.debug('Resuming any in progress delete operations')
        for volume in volumes:
            if volume['status'] == 'deleting':
                LOG.info(_('Resuming delete on volume: %s') % volume['id'])
                if CONF.volume_service_inithost_offload:
                    # Offload all the pending volume delete operations to the
                    # threadpool to prevent the main volume service thread
                    # from being blocked.
                    self._add_to_threadpool(self.delete_volume(ctxt,
                                                               volume['id']))
                else:
                    # By default, delete volumes sequentially
                    self.delete_volume(ctxt, volume['id'])
        # collect and publish service capabilities
        self.publish_service_capabilities(ctxt)
    def create_volume(self, context, volume_id, request_spec=None,
                      filter_properties=None, allow_reschedule=True,
                      snapshot_id=None, image_id=None, source_volid=None):
        """Creates the volume."""
        context_saved = context.deepcopy()
        context = context.elevated()
        if filter_properties is None:
            filter_properties = {}
        try:
            # NOTE(flaper87): Driver initialization is
            # verified by the task itself.
            flow_engine = create_volume.get_flow(
                context,
                self.db,
                self.driver,
                self.scheduler_rpcapi,
                self.host,
                volume_id,
                snapshot_id=snapshot_id,
                image_id=image_id,
                source_volid=source_volid,
                allow_reschedule=allow_reschedule,
                reschedule_context=context_saved,
                request_spec=request_spec,
                filter_properties=filter_properties)
        except Exception:
            LOG.exception(_("Failed to create manager volume flow"))
            raise exception.CinderException(
                _("Failed to create manager volume flow"))
        if snapshot_id is not None:
            # Make sure the snapshot is not deleted until we are done with it.
            locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot')
        elif source_volid is not None:
            # Make sure the volume is not deleted until we are done with it.
            locked_action = "%s-%s" % (source_volid, 'delete_volume')
        else:
            locked_action = None
        def _run_flow():
            # This code executes create volume flow. If something goes wrong,
            # flow reverts all job that was done and reraises an exception.
            # Otherwise, all data that was generated by flow becomes available
            # in flow engine's storage.
            with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
                flow_engine.run()
        @utils.synchronized(locked_action, external=True)
        def _run_flow_locked():
            _run_flow()
        if locked_action is None:
            _run_flow()
        else:
            _run_flow_locked()
        # Fetch created volume from storage
        volume_ref = flow_engine.storage.fetch('volume')
        # Update volume stats
        self.stats['allocated_capacity_gb'] += volume_ref['size']
        return volume_ref['id']
    @locked_volume_operation
    def delete_volume(self, context, volume_id, unmanage_only=False):
        """Deletes and unexports volume."""
        context = context.elevated()
        try:
            volume_ref = self.db.volume_get(context, volume_id)
        except exception.VolumeNotFound:
            # NOTE(thingee): It could be possible for a volume to
            # be deleted when resuming deletes from init_host().
            LOG.info(_("Tried to delete volume %s, but it no longer exists, "
                       "moving on") % (volume_id))
            return True
        if context.project_id != volume_ref['project_id']:
            project_id = volume_ref['project_id']
        else:
            project_id = context.project_id
        LOG.info(_("volume %s: deleting"), volume_ref['id'])
        if volume_ref['attach_status'] == "attached":
            # Volume is still attached, need to detach first
            raise exception.VolumeAttached(volume_id=volume_id)
        if volume_ref['host'] != self.host:
            raise exception.InvalidVolume(
                reason=_("volume is not local to this node"))
        self._notify_about_volume_usage(context, volume_ref, "delete.start")
        try:
            # NOTE(flaper87): Verify the driver is enabled
            # before going forward. The exception will be caught
            # and the volume status updated.
            utils.require_driver_initialized(self.driver)
            LOG.debug("volume %s: removing export", volume_ref['id'])
            self.driver.remove_export(context, volume_ref)
            LOG.debug("volume %s: deleting", volume_ref['id'])
            if unmanage_only:
                self.driver.unmanage(volume_ref)
            else:
                self.driver.delete_volume(volume_ref)
        except exception.VolumeIsBusy:
            LOG.error(_("Cannot delete volume %s: volume is busy"),
                      volume_ref['id'])
            self.db.volume_update(context, volume_ref['id'],
                                  {'status': 'available'})
            return True
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context,
                                      volume_ref['id'],
                                      {'status': 'error_deleting'})
        # If deleting the source volume in a migration, we want to skip quotas
        # and other database updates.
        if volume_ref['migration_status']:
            return True
        # Get reservations
        try:
            reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']}
            QUOTAS.add_volume_type_opts(context,
                                        reserve_opts,
                                        volume_ref.get('volume_type_id'))
            reservations = QUOTAS.reserve(context,
                                          project_id=project_id,
                                          **reserve_opts)
        except Exception:
            reservations = None
            LOG.exception(_("Failed to update usages deleting volume"))
        # Delete glance metadata if it exists
        self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
        self.db.volume_destroy(context, volume_id)
        LOG.info(_("volume %s: deleted successfully"), volume_ref['id'])
        self._notify_about_volume_usage(context, volume_ref, "delete.end")
        # Commit the reservations
        if reservations:
            QUOTAS.commit(context, reservations, project_id=project_id)
        self.stats['allocated_capacity_gb'] -= volume_ref['size']
        self.publish_service_capabilities(context)
        return True
    def create_snapshot(self, context, volume_id, snapshot_id):
        """Creates and exports the snapshot."""
        caller_context = context
        context = context.elevated()
        snapshot_ref = self.db.snapshot_get(context, snapshot_id)
        LOG.info(_("snapshot %s: creating"), snapshot_ref['id'])
        self._notify_about_snapshot_usage(
            context, snapshot_ref, "create.start")
        try:
            # NOTE(flaper87): Verify the driver is enabled
            # before going forward. The exception will be caught
            # and the snapshot status updated.
            utils.require_driver_initialized(self.driver)
            LOG.debug("snapshot %(snap_id)s: creating",
                      {'snap_id': snapshot_ref['id']})
            # Pass context so that drivers that want to use it, can,
            # but it is not a requirement for all drivers.
            snapshot_ref['context'] = caller_context
            model_update = self.driver.create_snapshot(snapshot_ref)
            if model_update:
                self.db.snapshot_update(context, snapshot_ref['id'],
                                        model_update)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.snapshot_update(context,
                                        snapshot_ref['id'],
                                        {'status': 'error'})
        vol_ref = self.db.volume_get(context, volume_id)
        if vol_ref.bootable:
            try:
                self.db.volume_glance_metadata_copy_to_snapshot(
                    context, snapshot_ref['id'], volume_id)
            except exception.CinderException as ex:
                LOG.exception(_("Failed updating %(snapshot_id)s"
                                " metadata using the provided volumes"
                                " %(volume_id)s metadata") %
                              {'volume_id': volume_id,
                               'snapshot_id': snapshot_id})
                self.db.snapshot_update(context,
                                        snapshot_ref['id'],
                                        {'status': 'error'})
                raise exception.MetadataCopyFailure(reason=ex)
        self.db.snapshot_update(context,
                                snapshot_ref['id'], {'status': 'available',
                                                     'progress': '100%'})
        LOG.info(_("snapshot %s: created successfully"), snapshot_ref['id'])
        self._notify_about_snapshot_usage(context, snapshot_ref, "create.end")
        return snapshot_id
    @locked_snapshot_operation
    def delete_snapshot(self, context, snapshot_id):
        """Deletes and unexports snapshot."""
        caller_context = context
        context = context.elevated()
        snapshot_ref = self.db.snapshot_get(context, snapshot_id)
        project_id = snapshot_ref['project_id']
        LOG.info(_("snapshot %s: deleting"), snapshot_ref['id'])
        self._notify_about_snapshot_usage(
            context, snapshot_ref, "delete.start")
        try:
            # NOTE(flaper87): Verify the driver is enabled
            # before going forward. The exception will be caught
            # and the snapshot status updated.
            utils.require_driver_initialized(self.driver)
            LOG.debug("snapshot %s: deleting", snapshot_ref['id'])
            # Pass context so that drivers that want to use it, can,
            # but it is not a requirement for all drivers.
            snapshot_ref['context'] = caller_context
            self.driver.delete_snapshot(snapshot_ref)
        except exception.SnapshotIsBusy:
            LOG.error(_("Cannot delete snapshot %s: snapshot is busy"),
                      snapshot_ref['id'])
            self.db.snapshot_update(context,
                                    snapshot_ref['id'],
                                    {'status': 'available'})
            return True
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.snapshot_update(context,
                                        snapshot_ref['id'],
                                        {'status': 'error_deleting'})
        # Get reservations
        try:
            if CONF.no_snapshot_gb_quota:
                reserve_opts = {'snapshots': -1}
            else:
                reserve_opts = {
                    'snapshots': -1,
                    'gigabytes': -snapshot_ref['volume_size'],
                }
            volume_ref = self.db.volume_get(context, snapshot_ref['volume_id'])
            QUOTAS.add_volume_type_opts(context,
                                        reserve_opts,
                                        volume_ref.get('volume_type_id'))
            reservations = QUOTAS.reserve(context,
                                          project_id=project_id,
                                          **reserve_opts)
        except Exception:
            reservations = None
            LOG.exception(_("Failed to update usages deleting snapshot"))
        self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot_id)
        self.db.snapshot_destroy(context, snapshot_id)
        LOG.info(_("snapshot %s: deleted successfully"), snapshot_ref['id'])
        self._notify_about_snapshot_usage(context, snapshot_ref, "delete.end")
        # Commit the reservations
        if reservations:
            QUOTAS.commit(context, reservations, project_id=project_id)
        return True
    def attach_volume(self, context, volume_id, instance_uuid, host_name,
                      mountpoint, mode):
        """Updates db to show volume is attached."""
        @utils.synchronized(volume_id, external=True)
        def do_attach():
            # check the volume status before attaching
            volume = self.db.volume_get(context, volume_id)
            volume_metadata = self.db.volume_admin_metadata_get(
                context.elevated(), volume_id)
            if volume['status'] == 'attaching':
                if (volume['instance_uuid'] and volume['instance_uuid'] !=
                        instance_uuid):
                    msg = _("being attached by another instance")
                    raise exception.InvalidVolume(reason=msg)
                if (volume['attached_host'] and volume['attached_host'] !=
                        host_name):
                    msg = _("being attached by another host")
                    raise exception.InvalidVolume(reason=msg)
                if (volume_metadata.get('attached_mode') and
                        volume_metadata.get('attached_mode') != mode):
                    msg = _("being attached by different mode")
                    raise exception.InvalidVolume(reason=msg)
            elif (not volume['migration_status'] and
                  volume['status'] != "available"):
                msg = _("status must be available or attaching")
                raise exception.InvalidVolume(reason=msg)
            # TODO(jdg): attach_time column is currently varchar
            # we should update this to a date-time object
            # also consider adding detach_time?
            self._notify_about_volume_usage(context, volume,
                                            "attach.start")
            self.db.volume_update(context, volume_id,
                                  {"instance_uuid": instance_uuid,
                                   "attached_host": host_name,
                                   "status": "attaching",
                                   "attach_time": timeutils.strtime()})
            self.db.volume_admin_metadata_update(context.elevated(),
                                                 volume_id,
                                                 {"attached_mode": mode},
                                                 False)
            if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
                self.db.volume_update(context, volume_id,
                                      {'status': 'error_attaching'})
                raise exception.InvalidUUID(uuid=instance_uuid)
            host_name_sanitized = utils.sanitize_hostname(
                host_name) if host_name else None
            volume = self.db.volume_get(context, volume_id)
            if volume_metadata.get('readonly') == 'True' and mode != 'ro':
                self.db.volume_update(context, volume_id,
                                      {'status': 'error_attaching'})
                raise exception.InvalidVolumeAttachMode(mode=mode,
                                                        volume_id=volume_id)
            try:
                # NOTE(flaper87): Verify the driver is enabled
                # before going forward. The exception will be caught
                # and the volume status updated.
                utils.require_driver_initialized(self.driver)
                self.driver.attach_volume(context,
                                          volume,
                                          instance_uuid,
                                          host_name_sanitized,
                                          mountpoint)
            except Exception:
                with excutils.save_and_reraise_exception():
                    self.db.volume_update(context, volume_id,
                                          {'status': 'error_attaching'})
            volume = self.db.volume_attached(context.elevated(),
                                             volume_id,
                                             instance_uuid,
                                             host_name_sanitized,
                                             mountpoint)
            if volume['migration_status']:
                self.db.volume_update(context, volume_id,
                                      {'migration_status': None})
            self._notify_about_volume_usage(context, volume, "attach.end")
        return do_attach()
    @locked_volume_operation
    def detach_volume(self, context, volume_id):
        """Updates db to show volume is detached."""
        # TODO(vish): refactor this into a more general "unreserve"
        volume = self.db.volume_get(context, volume_id)
        self._notify_about_volume_usage(context, volume, "detach.start")
        try:
            # NOTE(flaper87): Verify the driver is enabled
            # before going forward. The exception will be caught
            # and the volume status updated.
            utils.require_driver_initialized(self.driver)
            self.driver.detach_volume(context, volume)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context,
                                      volume_id,
                                      {'status': 'error_detaching'})
        self.db.volume_detached(context.elevated(), volume_id)
        self.db.volume_admin_metadata_delete(context.elevated(), volume_id,
                                             'attached_mode')
        # NOTE(jdg): We used to do an ensure export here to
        # catch upgrades while volumes were attached (E->F)
        # this was necessary to convert in-use volumes from
        # int ID's to UUID's.  Don't need this any longer
        # We're going to remove the export here
        # (delete the iscsi target)
        volume = self.db.volume_get(context, volume_id)
        try:
            utils.require_driver_initialized(self.driver)
        except exception.DriverNotInitialized:
            with excutils.save_and_reraise_exception():
                LOG.exception(_("Error detaching volume %(volume)s, "
                                "due to uninitialized driver."),
                              {"volume": volume_id})
        self._notify_about_volume_usage(context, volume, "detach.end")
    def copy_volume_to_image(self, context, volume_id, image_meta):
        """Uploads the specified volume to Glance.
        image_meta is a dictionary containing the following keys:
        'id', 'container_format', 'disk_format'
        """
        payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
        image_service = None
        try:
            volume = self.db.volume_get(context, volume_id)
            # NOTE(flaper87): Verify the driver is enabled
            # before going forward. The exception will be caught
            # and the volume status updated.
            utils.require_driver_initialized(self.driver)
            image_service, image_id = \
                glance.get_remote_image_service(context, image_meta['id'])
            self.driver.copy_volume_to_image(context, volume, image_service,
                                             image_meta)
            LOG.debug("Uploaded volume %(volume_id)s to "
                      "image (%(image_id)s) successfully",
                      {'volume_id': volume_id, 'image_id': image_id})
        except Exception as error:
            LOG.error(_("Error occurred while uploading volume %(volume_id)s "
                        "to image %(image_id)s."),
                      {'volume_id': volume_id, 'image_id': image_meta['id']})
            if image_service is not None:
                # Deletes the image if it is in queued or saving state
                self._delete_image(context, image_meta['id'], image_service)
            with excutils.save_and_reraise_exception():
                payload['message'] = unicode(error)
        finally:
            if (volume['instance_uuid'] is None and
                    volume['attached_host'] is None):
                self.db.volume_update(context, volume_id,
                                      {'status': 'available'})
            else:
                self.db.volume_update(context, volume_id,
                                      {'status': 'in-use'})
    def _delete_image(self, context, image_id, image_service):
        """Deletes an image stuck in queued or saving state."""
        try:
            image_meta = image_service.show(context, image_id)
            image_status = image_meta.get('status')
            if image_status == 'queued' or image_status == 'saving':
                LOG.warn("Deleting image %(image_id)s in %(image_status)s "
                         "state.",
                         {'image_id': image_id,
                          'image_status': image_status})
                image_service.delete(context, image_id)
        except Exception:
            LOG.warn(_("Error occurred while deleting image %s."),
                     image_id, exc_info=True)
    def initialize_connection(self, context, volume_id, connector):
        """Prepare volume for connection from host represented by connector.
        This method calls the driver initialize_connection and returns
        it to the caller.  The connector parameter is a dictionary with
        information about the host that will connect to the volume in the
        following format::
            {
                'ip': ip,
                'initiator': initiator,
            }
        ip: the ip address of the connecting machine
        initiator: the iscsi initiator name of the connecting machine.
        This can be None if the connecting machine does not support iscsi
        connections.
        driver is responsible for doing any necessary security setup and
        returning a connection_info dictionary in the following format::
            {
                'driver_volume_type': driver_volume_type,
                'data': data,
            }
        driver_volume_type: a string to identify the type of volume.  This
                           can be used by the calling code to determine the
                           strategy for connecting to the volume. This could
                           be 'iscsi', 'rbd', 'sheepdog', etc.
        data: this is the data that the calling code will use to connect
              to the volume. Keep in mind that this will be serialized to
              json in various places, so it should not contain any non-json
              data types.
        """
        # NOTE(flaper87): Verify the driver is enabled
        # before going forward. The exception will be caught
        # and the volume status updated.
        utils.require_driver_initialized(self.driver)
        try:
            self.driver.validate_connector(connector)
        except Exception as err:
            err_msg = (_('Unable to fetch connection information from '
                         'backend: %(err)s') % {'err': err})
            LOG.error(err_msg)
            raise exception.VolumeBackendAPIException(data=err_msg)
        volume = self.db.volume_get(context, volume_id)
        model_update = None
        try:
            LOG.debug("Volume %s: creating export", volume_id)
            model_update = self.driver.create_export(context.elevated(),
                                                     volume)
        except exception.CinderException:
            err_msg = (_('Unable to create export for volume %(volume_id)s') %
                       {'volume_id': volume_id})
            LOG.exception(err_msg)
            raise exception.VolumeBackendAPIException(data=err_msg)
        try:
            if model_update:
                volume = self.db.volume_update(context,
                                               volume_id,
                                               model_update)
        except exception.CinderException as ex:
            LOG.exception(_("Failed updating model of volume %(volume_id)s"
                          " with driver provided model %(model)s") %
                          {'volume_id': volume_id, 'model': model_update})
            raise exception.ExportFailure(reason=ex)
        try:
            conn_info = self.driver.initialize_connection(volume, connector)
        except Exception as err:
            err_msg = (_('Unable to fetch connection information from '
                         'backend: %(err)s') % {'err': err})
            LOG.error(err_msg)
            self.driver.remove_export(context.elevated(), volume)
            raise exception.VolumeBackendAPIException(data=err_msg)
        # Add qos_specs to connection info
        typeid = volume['volume_type_id']
        specs = None
        if typeid:
            res = volume_types.get_volume_type_qos_specs(typeid)
            qos = res['qos_specs']
            # only pass qos_specs that is designated to be consumed by
            # front-end, or both front-end and back-end.
            if qos and qos.get('consumer') in ['front-end', 'both']:
                specs = qos.get('specs')
        qos_spec = dict(qos_specs=specs)
        conn_info['data'].update(qos_spec)
        # Add access_mode to connection info
        volume_metadata = self.db.volume_admin_metadata_get(context.elevated(),
                                                            volume_id)
        if conn_info['data'].get('access_mode') is None:
            access_mode = volume_metadata.get('attached_mode')
            if access_mode is None:
                # NOTE(zhiyan): client didn't call 'os-attach' before
                access_mode = ('ro'
                               if volume_metadata.get('readonly') == 'True'
                               else 'rw')
            conn_info['data']['access_mode'] = access_mode
        # NOTE(skolathur): If volume_type is fibre_channel, invoke
        # FCZoneManager to add access control via FC zoning.
        vol_type = conn_info.get('driver_volume_type', None)
        mode = self.configuration.zoning_mode
        LOG.debug("Zoning Mode: %s", mode)
        if vol_type == 'fibre_channel' and self.zonemanager:
            self._add_or_delete_fc_connection(conn_info, 1)
        return conn_info
    def terminate_connection(self, context, volume_id, connector, force=False):
        """Cleanup connection from host represented by connector.
        The format of connector is the same as for initialize_connection.
        """
        # NOTE(flaper87): Verify the driver is enabled
        # before going forward. The exception will be caught
        # and the volume status updated.
        utils.require_driver_initialized(self.driver)
        volume_ref = self.db.volume_get(context, volume_id)
        try:
            conn_info = self.driver.terminate_connection(volume_ref,
                                                         connector,
                                                         force=force)
            # NOTE(skolathur): If volume_type is fibre_channel, invoke
            # FCZoneManager to remove access control via FC zoning.
            if conn_info:
                vol_type = conn_info.get('driver_volume_type', None)
                mode = self.configuration.zoning_mode
                LOG.debug("Zoning Mode: %s", mode)
                if vol_type == 'fibre_channel' and self.zonemanager:
                    self._add_or_delete_fc_connection(conn_info, 0)
        except Exception as err:
            err_msg = (_('Unable to terminate volume connection: %(err)s')
                       % {'err': err})
            LOG.error(err_msg)
            raise exception.VolumeBackendAPIException(data=err_msg)
        try:
            LOG.debug("volume %s: removing export", volume_id)
            self.driver.remove_export(context.elevated(), volume_ref)
        except Exception as ex:
            LOG.exception(_("Error detaching volume %(volume)s, "
                            "due to remove export failure."),
                          {"volume": volume_id})
            raise exception.RemoveExportException(volume=volume_id, reason=ex)
    def accept_transfer(self, context, volume_id, new_user, new_project):
        # NOTE(flaper87): Verify the driver is enabled
        # before going forward. The exception will be caught
        # and the volume status updated.
        utils.require_driver_initialized(self.driver)
        # NOTE(jdg): need elevated context as we haven't "given" the vol
        # yet
        volume_ref = self.db.volume_get(context.elevated(), volume_id)
        self.driver.accept_transfer(context, volume_ref, new_user, new_project)
    def _migrate_volume_generic(self, ctxt, volume, host, new_type_id):
        rpcapi = volume_rpcapi.VolumeAPI()
        # Create new volume on remote host
        new_vol_values = {}
        for k, v in volume.iteritems():
            new_vol_values[k] = v
        del new_vol_values['id']
        del new_vol_values['_name_id']
        # We don't copy volume_type because the db sets that according to
        # volume_type_id, which we do copy
        del new_vol_values['volume_type']
        if new_type_id:
            new_vol_values['volume_type_id'] = new_type_id
        new_vol_values['host'] = host['host']
        new_vol_values['status'] = 'creating'
        new_vol_values['migration_status'] = 'target:%s' % volume['id']
        new_vol_values['attach_status'] = 'detached'
        new_volume = self.db.volume_create(ctxt, new_vol_values)
        rpcapi.create_volume(ctxt, new_volume, host['host'],
                             None, None, allow_reschedule=False)
        # Wait for new_volume to become ready
        starttime = time.time()
        deadline = starttime + CONF.migration_create_volume_timeout_secs
        new_volume = self.db.volume_get(ctxt, new_volume['id'])
        tries = 0
        while new_volume['status'] != 'available':
            tries = tries + 1
            now = time.time()
            if new_volume['status'] == 'error':
                msg = _("failed to create new_volume on destination host")
                raise exception.VolumeMigrationFailed(reason=msg)
            elif now > deadline:
                msg = _("timeout creating new_volume on destination host")
                raise exception.VolumeMigrationFailed(reason=msg)
            else:
                time.sleep(tries ** 2)
            new_volume = self.db.volume_get(ctxt, new_volume['id'])
        # Copy the source volume to the destination volume
        try:
            if (volume['instance_uuid'] is None and
                    volume['attached_host'] is None):
                self.driver.copy_volume_data(ctxt, volume, new_volume,
                                             remote='dest')
                # The above call is synchronous so we complete the migration
                self.migrate_volume_completion(ctxt, volume['id'],
                                               new_volume['id'], error=False)
            else:
                nova_api = compute.API()
                # This is an async call to Nova, which will call the completion
                # when it's done
                nova_api.update_server_volume(ctxt, volume['instance_uuid'],
                                              volume['id'], new_volume['id'])
        except Exception:
            with excutils.save_and_reraise_exception():
                msg = _("Failed to copy volume %(vol1)s to %(vol2)s")
                LOG.error(msg % {'vol1': volume['id'],
                                 'vol2': new_volume['id']})
                volume = self.db.volume_get(ctxt, volume['id'])
                # If we're in the completing phase don't delete the target
                # because we may have already deleted the source!
                if volume['migration_status'] == 'migrating':
                    rpcapi.delete_volume(ctxt, new_volume)
                new_volume['migration_status'] = None
    def _get_original_status(self, volume):
        if (volume['instance_uuid'] is None and
                volume['attached_host'] is None):
            return 'available'
        else:
            return 'in-use'
    def migrate_volume_completion(self, ctxt, volume_id, new_volume_id,
                                  error=False):
        try:
            # NOTE(flaper87): Verify the driver is enabled
            # before going forward. The exception will be caught
            # and the migration status updated.
            utils.require_driver_initialized(self.driver)
        except exception.DriverNotInitialized:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(ctxt, volume_id,
                                      {'migration_status': 'error'})
        msg = _("migrate_volume_completion: completing migration for "
                "volume %(vol1)s (temporary volume %(vol2)s")
        LOG.debug(msg % {'vol1': volume_id, 'vol2': new_volume_id})
        volume = self.db.volume_get(ctxt, volume_id)
        new_volume = self.db.volume_get(ctxt, new_volume_id)
        rpcapi = volume_rpcapi.VolumeAPI()
        status_update = None
        if volume['status'] == 'retyping':
            status_update = {'status': self._get_original_status(volume)}
        if error:
            msg = _("migrate_volume_completion is cleaning up an error "
                    "for volume %(vol1)s (temporary volume %(vol2)s")
            LOG.info(msg % {'vol1': volume['id'],
                            'vol2': new_volume['id']})
            new_volume['migration_status'] = None
            rpcapi.delete_volume(ctxt, new_volume)
            updates = {'migration_status': None}
            if status_update:
                updates.update(status_update)
            self.db.volume_update(ctxt, volume_id, updates)
            return volume_id
        self.db.volume_update(ctxt, volume_id,
                              {'migration_status': 'completing'})
        # Delete the source volume (if it fails, don't fail the migration)
        try:
            if status_update['status'] == 'in-use':
                self.detach_volume(ctxt, volume_id)
            self.delete_volume(ctxt, volume_id)
        except Exception as ex:
            msg = _("Failed to delete migration source vol %(vol)s: %(err)s")
            LOG.error(msg % {'vol': volume_id, 'err': ex})
        self.db.finish_volume_migration(ctxt, volume_id, new_volume_id)
        self.db.volume_destroy(ctxt, new_volume_id)
        if status_update:
            updates = {'migration_status': 'completing'}
            updates.update(status_update)
        else:
            updates = {'migration_status': None}
        self.db.volume_update(ctxt, volume_id, updates)
        if status_update:
            rpcapi.attach_volume(ctxt,
                                 volume,
                                 volume['instance_uuid'],
                                 volume['attached_host'],
                                 volume['mountpoint'],
                                 'rw')
        return volume['id']
    def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False,
                       new_type_id=None):
        """Migrate the volume to the specified host (called on source host)."""
        try:
            # NOTE(flaper87): Verify the driver is enabled
            # before going forward. The exception will be caught
            # and the migration status updated.
            utils.require_driver_initialized(self.driver)
        except exception.DriverNotInitialized:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(ctxt, volume_id,
                                      {'migration_status': 'error'})
        volume_ref = self.db.volume_get(ctxt, volume_id)
        model_update = None
        moved = False
        status_update = None
        if volume_ref['status'] == 'retyping':
            status_update = {'status': self._get_original_status(volume_ref)}
        self.db.volume_update(ctxt, volume_ref['id'],
                              {'migration_status': 'migrating'})
        if not force_host_copy and new_type_id is None:
            try:
                LOG.debug("volume %s: calling driver migrate_volume",
                          volume_ref['id'])
                moved, model_update = self.driver.migrate_volume(ctxt,
                                                                 volume_ref,
                                                                 host)
                if moved:
                    updates = {'host': host['host'],
                               'migration_status': None}
                    if status_update:
                        updates.update(status_update)
                    if model_update:
                        updates.update(model_update)
                    volume_ref = self.db.volume_update(ctxt,
                                                       volume_ref['id'],
                                                       updates)
            except Exception:
                with excutils.save_and_reraise_exception():
                    updates = {'migration_status': None}
                    if status_update:
                        updates.update(status_update)
                    model_update = self.driver.create_export(ctxt, volume_ref)
                    if model_update:
                        updates.update(model_update)
                    self.db.volume_update(ctxt, volume_ref['id'], updates)
        if not moved:
            try:
                self._migrate_volume_generic(ctxt, volume_ref, host,
                                             new_type_id)
            except Exception:
                with excutils.save_and_reraise_exception():
                    updates = {'migration_status': None}
                    if status_update:
                        updates.update(status_update)
                    model_update = self.driver.create_export(ctxt, volume_ref)
                    if model_update:
                        updates.update(model_update)
                    self.db.volume_update(ctxt, volume_ref['id'], updates)
    @periodic_task.periodic_task
    def _report_driver_status(self, context):
        LOG.info(_("Updating volume status"))
        if not self.driver.initialized:
            if self.driver.configuration.config_group is None:
                config_group = ''
            else:
                config_group = ('(config name %s)' %
                                self.driver.configuration.config_group)
            LOG.warning(_('Unable to update stats, %(driver_name)s '
                          '-%(driver_version)s '
                          '%(config_group)s driver is uninitialized.') %
                        {'driver_name': self.driver.__class__.__name__,
                         'driver_version': self.driver.get_version(),
                         'config_group': config_group})
        else:
            volume_stats = self.driver.get_volume_stats(refresh=True)
            if self.extra_capabilities:
                volume_stats.update(self.extra_capabilities)
            if volume_stats:
                # Append volume stats with 'allocated_capacity_gb'
                volume_stats.update(self.stats)
                # queue it to be sent to the Schedulers.
                self.update_service_capabilities(volume_stats)
    def publish_service_capabilities(self, context):
        """Collect driver status and then publish."""
        self._report_driver_status(context)
        self._publish_service_capabilities(context)
    def notification(self, context, event):
        LOG.info(_("Notification {%s} received"), event)
    def _notify_about_volume_usage(self,
                                   context,
                                   volume,
                                   event_suffix,
                                   extra_usage_info=None):
        volume_utils.notify_about_volume_usage(
            context, volume, event_suffix,
            extra_usage_info=extra_usage_info, host=self.host)
    def _notify_about_snapshot_usage(self,
                                     context,
                                     snapshot,
                                     event_suffix,
                                     extra_usage_info=None):
        volume_utils.notify_about_snapshot_usage(
            context, snapshot, event_suffix,
            extra_usage_info=extra_usage_info, host=self.host)
    def extend_volume(self, context, volume_id, new_size, reservations):
        try:
            # NOTE(flaper87): Verify the driver is enabled
            # before going forward. The exception will be caught
            # and the volume status updated.
            utils.require_driver_initialized(self.driver)
        except exception.DriverNotInitialized:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context, volume_id,
                                      {'status': 'error_extending'})
        volume = self.db.volume_get(context, volume_id)
        size_increase = (int(new_size)) - volume['size']
        self._notify_about_volume_usage(context, volume, "resize.start")
        try:
            LOG.info(_("volume %s: extending"), volume['id'])
            self.driver.extend_volume(volume, new_size)
            LOG.info(_("volume %s: extended successfully"), volume['id'])
        except Exception:
            LOG.exception(_("volume %s: Error trying to extend volume"),
                          volume_id)
            try:
                self.db.volume_update(context, volume['id'],
                                      {'status': 'error_extending'})
                raise exception.CinderException(_("Volume %s: Error trying "
                                                  "to extend volume") %
                                                volume_id)
            finally:
                QUOTAS.rollback(context, reservations)
                return
        QUOTAS.commit(context, reservations)
        self.db.volume_update(context, volume['id'], {'size': int(new_size),
                                                      'status': 'available'})
        self.stats['allocated_capacity_gb'] += size_increase
        self._notify_about_volume_usage(
            context, volume, "resize.end",
            extra_usage_info={'size': int(new_size)})
    def retype(self, ctxt, volume_id, new_type_id, host,
               migration_policy='never', reservations=None):
        def _retype_error(context, volume_id, old_reservations,
                          new_reservations, status_update):
            try:
                self.db.volume_update(context, volume_id, status_update)
            finally:
                QUOTAS.rollback(context, old_reservations)
                QUOTAS.rollback(context, new_reservations)
        context = ctxt.elevated()
        volume_ref = self.db.volume_get(ctxt, volume_id)
        status_update = {'status': self._get_original_status(volume_ref)}
        if context.project_id != volume_ref['project_id']:
            project_id = volume_ref['project_id']
        else:
            project_id = context.project_id
        try:
            # NOTE(flaper87): Verify the driver is enabled
            # before going forward. The exception will be caught
            # and the volume status updated.
            utils.require_driver_initialized(self.driver)
        except exception.DriverNotInitialized:
            with excutils.save_and_reraise_exception():
                # NOTE(flaper87): Other exceptions in this method don't
                # set the volume status to error. Should that be done
                # here? Setting the volume back to it's original status
                # for now.
                self.db.volume_update(context, volume_id, status_update)
        # Get old reservations
        try:
            reserve_opts = {'volumes': -1, 'gigabytes': -volume_ref['size']}
            QUOTAS.add_volume_type_opts(context,
                                        reserve_opts,
                                        volume_ref.get('volume_type_id'))
            old_reservations = QUOTAS.reserve(context,
                                              project_id=project_id,
                                              **reserve_opts)
        except Exception:
            old_reservations = None
            self.db.volume_update(context, volume_id, status_update)
            LOG.exception(_("Failed to update usages while retyping volume."))
            raise exception.CinderException(_("Failed to get old volume type"
                                              " quota reservations"))
        # We already got the new reservations
        new_reservations = reservations
        # If volume types have the same contents, no need to do anything
        retyped = False
        diff, all_equal = volume_types.volume_types_diff(
            context, volume_ref.get('volume_type_id'), new_type_id)
        if all_equal:
            retyped = True
        # Call driver to try and change the type
        if not retyped:
            try:
                new_type = volume_types.get_volume_type(context, new_type_id)
                retyped = self.driver.retype(context, volume_ref, new_type,
                                             diff, host)
                if retyped:
                    LOG.info(_("Volume %s: retyped successfully"), volume_id)
            except Exception as ex:
                retyped = False
                LOG.error(_("Volume %s: driver error when trying to retype, "
                            "falling back to generic mechanism."),
                          volume_ref['id'])
                LOG.exception(ex)
        # We could not change the type, so we need to migrate the volume, where
        # the destination volume will be of the new type
        if not retyped:
            if migration_policy == 'never':
                _retype_error(context, volume_id, old_reservations,
                              new_reservations, status_update)
                msg = _("Retype requires migration but is not allowed.")
                raise exception.VolumeMigrationFailed(reason=msg)
            snaps = self.db.snapshot_get_all_for_volume(context,
                                                        volume_ref['id'])
            if snaps:
                _retype_error(context, volume_id, old_reservations,
                              new_reservations, status_update)
                msg = _("Volume must not have snapshots.")
                LOG.error(msg)
                raise exception.InvalidVolume(reason=msg)
            self.db.volume_update(context, volume_ref['id'],
                                  {'migration_status': 'starting'})
            try:
                self.migrate_volume(context, volume_id, host,
                                    new_type_id=new_type_id)
            except Exception:
                with excutils.save_and_reraise_exception():
                    _retype_error(context, volume_id, old_reservations,
                                  new_reservations, status_update)
        else:
            self.db.volume_update(context, volume_id,
                                  {'volume_type_id': new_type_id,
                                   'host': host['host'],
                                   'status': status_update['status']})
        if old_reservations:
            QUOTAS.commit(context, old_reservations, project_id=project_id)
        if new_reservations:
            QUOTAS.commit(context, new_reservations, project_id=project_id)
        self.publish_service_capabilities(context)
    def manage_existing(self, ctxt, volume_id, ref=None):
        LOG.debug('manage_existing: managing %s' % ref)
        try:
            flow_engine = manage_existing.get_flow(
                ctxt,
                self.db,
                self.driver,
                self.host,
                volume_id,
                ref)
        except Exception:
            LOG.exception(_("Failed to create manage_existing flow."))
            raise exception.CinderException(
                _("Failed to create manage existing flow."))
        with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
            flow_engine.run()
        # Fetch created volume from storage
        volume_ref = flow_engine.storage.fetch('volume')
        # Update volume stats
        self.stats['allocated_capacity_gb'] += volume_ref['size']
        return volume_ref['id']
    def _add_or_delete_fc_connection(self, conn_info, zone_op):
        """Add or delete connection control to fibre channel network.
        In case of fibre channel, when zoning mode is set as fabric
        ZoneManager is invoked to apply FC zoning configuration to the network
        using initiator and target WWNs used for attach/detach.
        params conn_info: connector passed by volume driver after
        initialize_connection or terminate_connection.
        params zone_op: Indicates if it is a zone add or delete operation
        zone_op=0 for delete connection and 1 for add connection
        """
        _initiator_target_map = None
        if 'initiator_target_map' in conn_info['data']:
            _initiator_target_map = conn_info['data']['initiator_target_map']
        LOG.debug("Initiator Target map:%s", _initiator_target_map)
        # NOTE(skolathur): Invoke Zonemanager to handle automated FC zone
        # management when vol_type is fibre_channel and zoning_mode is fabric
        # Initiator_target map associating each initiator WWN to one or more
        # target WWN is passed to ZoneManager to add or update zone config.
        LOG.debug("Zoning op: %s", zone_op)
        if _initiator_target_map is not None:
            try:
                if zone_op == 1:
                    self.zonemanager.add_connection(_initiator_target_map)
                elif zone_op == 0:
                    self.zonemanager.delete_connection(_initiator_target_map)
            except exception.ZoneManagerException as e:
                with excutils.save_and_reraise_exception():
                    LOG.error(e)
 | |
| 
	#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
#  spendfrom.py  # Lists available funds
#  spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
    """Make sure json library being used does not lose precision converting BTC values"""
    n = Decimal("20000000.00000003")
    satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
    if satoshis != 2000000000000003:
        raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
    """Return the default location of the bitcoin data directory"""
    if platform.system() == "Darwin":
        return os.path.expanduser("~/Library/Application Support/Bitcoin/")
    elif platform.system() == "Windows":
        return os.path.join(os.environ['APPDATA'], "Bitcoin")
    return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
    """Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
    from ConfigParser import SafeConfigParser
    class FakeSecHead(object):
        def __init__(self, fp):
            self.fp = fp
            self.sechead = '[all]\n'
        def readline(self):
            if self.sechead:
                try: return self.sechead
                finally: self.sechead = None
            else:
                s = self.fp.readline()
                if s.find('#') != -1:
                    s = s[0:s.find('#')].strip() +"\n"
                return s
    config_parser = SafeConfigParser()
    config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
    return dict(config_parser.items("all"))
def connect_JSON(config):
    """Connect to a bitcoin JSON-RPC server"""
    testnet = config.get('testnet', '0')
    testnet = (int(testnet) > 0)  # 0/1 in config file, convert to True/False
    if not 'rpcport' in config:
        config['rpcport'] = 135894 if testnet else 35894
    connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
    try:
        result = ServiceProxy(connect)
        # ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
        # but also make sure the bitcoind we're talking to is/isn't testnet:
        if result.getmininginfo()['testnet'] != testnet:
            sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
            sys.exit(1)
        return result
    except:
        sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
        sys.exit(1)
def unlock_wallet(bitcoind):
    info = bitcoind.getinfo()
    if 'unlocked_until' not in info:
        return True # wallet is not encrypted
    t = int(info['unlocked_until'])
    if t <= time.time():
        try:
            passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
            bitcoind.walletpassphrase(passphrase, 5)
        except:
            sys.stderr.write("Wrong passphrase\n")
    info = bitcoind.getinfo()
    return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
    address_summary = dict()
    address_to_account = dict()
    for info in bitcoind.listreceivedbyaddress(0):
        address_to_account[info["address"]] = info["account"]
    unspent = bitcoind.listunspent(0)
    for output in unspent:
        # listunspent doesn't give addresses, so:
        rawtx = bitcoind.getrawtransaction(output['txid'], 1)
        vout = rawtx["vout"][output['vout']]
        pk = vout["scriptPubKey"]
        # This code only deals with ordinary pay-to-bitcoin-address
        # or pay-to-script-hash outputs right now; anything exotic is ignored.
        if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
            continue
        
        address = pk["addresses"][0]
        if address in address_summary:
            address_summary[address]["total"] += vout["value"]
            address_summary[address]["outputs"].append(output)
        else:
            address_summary[address] = {
                "total" : vout["value"],
                "outputs" : [output],
                "account" : address_to_account.get(address, "")
                }
    return address_summary
def select_coins(needed, inputs):
    # Feel free to improve this, this is good enough for my simple needs:
    outputs = []
    have = Decimal("0.0")
    n = 0
    while have < needed and n < len(inputs):
        outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
        have += inputs[n]["amount"]
        n += 1
    return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
    all_coins = list_available(bitcoind)
    total_available = Decimal("0.0")
    needed = amount+fee
    potential_inputs = []
    for addr in fromaddresses:
        if addr not in all_coins:
            continue
        potential_inputs.extend(all_coins[addr]["outputs"])
        total_available += all_coins[addr]["total"]
    if total_available < needed:
        sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
        sys.exit(1)
    #
    # Note:
    # Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
    # Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
    # Decimals, I'm casting amounts to float before sending them to bitcoind.
    #  
    outputs = { toaddress : float(amount) }
    (inputs, change_amount) = select_coins(needed, potential_inputs)
    if change_amount > BASE_FEE:  # don't bother with zero or tiny change
        change_address = fromaddresses[-1]
        if change_address in outputs:
            outputs[change_address] += float(change_amount)
        else:
            outputs[change_address] = float(change_amount)
    rawtx = bitcoind.createrawtransaction(inputs, outputs)
    signed_rawtx = bitcoind.signrawtransaction(rawtx)
    if not signed_rawtx["complete"]:
        sys.stderr.write("signrawtransaction failed\n")
        sys.exit(1)
    txdata = signed_rawtx["hex"]
    return txdata
def compute_amount_in(bitcoind, txinfo):
    result = Decimal("0.0")
    for vin in txinfo['vin']:
        in_info = bitcoind.getrawtransaction(vin['txid'], 1)
        vout = in_info['vout'][vin['vout']]
        result = result + vout['value']
    return result
def compute_amount_out(txinfo):
    result = Decimal("0.0")
    for vout in txinfo['vout']:
        result = result + vout['value']
    return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
    class FeeError(RuntimeError):
        pass
    try:
        txinfo = bitcoind.decoderawtransaction(txdata_hex)
        total_in = compute_amount_in(bitcoind, txinfo)
        total_out = compute_amount_out(txinfo)
        if total_in-total_out > max_fee:
            raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
        tx_size = len(txdata_hex)/2
        kb = tx_size/1000  # integer division rounds down
        if kb > 1 and fee < BASE_FEE:
            raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
        if total_in < 0.01 and fee < BASE_FEE:
            raise FeeError("Rejecting no-fee, tiny-amount transaction")
        # Exercise for the reader: compute transaction priority, and
        # warn if this is a very-low-priority transaction
    except FeeError as err:
        sys.stderr.write((str(err)+"\n"))
        sys.exit(1)
def main():
    import optparse
    parser = optparse.OptionParser(usage="%prog [options]")
    parser.add_option("--from", dest="fromaddresses", default=None,
                      help="addresses to get bitcoins from")
    parser.add_option("--to", dest="to", default=None,
                      help="address to get send bitcoins to")
    parser.add_option("--amount", dest="amount", default=None,
                      help="amount to send")
    parser.add_option("--fee", dest="fee", default="0.0",
                      help="fee to include")
    parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
                      help="location of bitcoin.conf file with RPC username/password (default: %default)")
    parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
                      help="Use the test network")
    parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
                      help="Don't broadcast the transaction, just create and print the transaction data")
    (options, args) = parser.parse_args()
    check_json_precision()
    config = read_bitcoin_config(options.datadir)
    if options.testnet: config['testnet'] = True
    bitcoind = connect_JSON(config)
    if options.amount is None:
        address_summary = list_available(bitcoind)
        for address,info in address_summary.iteritems():
            n_transactions = len(info['outputs'])
            if n_transactions > 1:
                print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
            else:
                print("%s %.8f %s"%(address, info['total'], info['account']))
    else:
        fee = Decimal(options.fee)
        amount = Decimal(options.amount)
        while unlock_wallet(bitcoind) == False:
            pass # Keep asking for passphrase until they get it right
        txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
        sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
        if options.dry_run:
            print(txdata)
        else:
            txid = bitcoind.sendrawtransaction(txdata)
            print(txid)
if __name__ == '__main__':
    main()
 | |
| 
	###############################################################################
##
##  Copyright (C) 2013-2014 Tavendo GmbH
##
##  Licensed under the Apache License, Version 2.0 (the "License");
##  you may not use this file except in compliance with the License.
##  You may obtain a copy of the License at
##
##      http://www.apache.org/licenses/LICENSE-2.0
##
##  Unless required by applicable law or agreed to in writing, software
##  distributed under the License is distributed on an "AS IS" BASIS,
##  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
##  See the License for the specific language governing permissions and
##  limitations under the License.
##
###############################################################################
from __future__ import absolute_import
class RegisterOptions:
   """
   """
   def __init__(self,
                match = None):
      assert(match is None or (type(match) == str and match in ['exact', 'prefix', 'wildcard']))
      self.match = match
class Registration:
   """
   """
   def __init__(self, id, procedure, endpoint):
      self._id = id
      self._procedure = procedure
      self._endpoint = endpoint
      self._isActive = True
class CallOptions:
   """
   Wrapper allowing to specify a remote procedure to be called while providing
   details on exactly how the call should be performed.
   """
   def __init__(self,
                onProgress = None,
                timeout = None,
                discloseMe = None,
                runOn = None,
                runMode = None):
      """
      Constructor.
      :param procedure: The URI of the remote procedure to be called, e.g. "com.myapp.hello".
      :type procedure: str
      :param onProgress: A callback that will be called when the remote endpoint
                         called yields interim call progress results.
      :type onProgress: a callable
      :param timeout: Time in seconds after which the call should be automatically cancelled.
      :type timeout: float
      :param discloseMe: Request to disclose the identity of the caller (it's WAMP session ID)
                         to Callees. Note that a Dealer, depending on Dealer configuration, might
                         reject the request, or might disclose the Callee's identity without
                         a request to do so.
      :type discloseMe: bool
      :param runOn: If present (non-None), indicates a distributed call. Distributed calls allows
                    to run a call issued by a Caller on one or more endpoints implementing the
                    called procedure. Permissible value are: "all", "any" and "partition".
                    If `runOne == "partition"`, then `runPartitions` MUST be present.
      :type runOn: str
      """
      assert(onProgress is None or callable(onProgress))
      assert(timeout is None or (type(timeout) in [int, float] and timeout > 0))
      assert(discloseMe is None or type(discloseMe) == bool)
      assert(runOn is None or (type(runOn) == str and runOn in ["all", "any", "partition"]))
      self.onProgress = onProgress
      self.timeout = timeout
      self.discloseMe = discloseMe
      self.runOn = runOn
      self.runMode = runMode
class CallResult:
   """
   Wrapper for WAMP remote procedure call results that contain multiple positional
   return values or keyword return values.
   """
   def __init__(self, *results, **kwresults):
      """
      Constructor.
      :param results: The positional result values.
      :type results: list
      :param kwresults: The keyword result values.
      :type kwresults: dict
      """
      self.results = results
      self.kwresults = kwresults
class Invocation:
   """
   """
   def __init__(self, caller = None):
      self.caller = caller
   def progress(self, *args, **kwargs):
      pass
class SubscribeOptions:
   """
   """
   def __init__(self, match = None):
      assert(match is None or (type(match) == str and match in ['exact', 'prefix', 'wildcard']))
      self.match = match
class Subscription:
   """
   """
   def __init__(self, id, topic):
      self._id = id
      self._topic = topic
      self._watchers = []
      self._isActive = True
   def watch(self, watcher):
      """
      Adds a watcher to the subscription.
      If the given watcher is already watching, silently ignore the call. Otherwise
      add the watcher (which must be a callable) to the list of watchers.
      :param watcher: The watcher who should be notified upon receiving events on the
                      given subscription. This must be a callable which will get called
                      with the topic and event payload as arguments upon receiving of
                      events.
      :type watcher: callable
      """
      assert(self._isActive)
      assert(callable(watcher))
      if not watcher in self._watchers:
         self._watchers.append(watcher)
   def unwatch(self, watcher = None):
      """
      Remove a watcher from the subscription.
      If the given watcher is no watching, silently ignore the call. Otherwise
      remote the watcher from the list of watchers.
      :param watcher: The watcher who should be removed from the list of current watchers
                      or None to remove all watchers.
      :type watcher: callable
      """
      assert(self._isActive)
      if watcher:
         if watcher in self._watchers:
            self._watchers.remove(watcher)
      else:
         self._watchers = []
   def notify(self, event):
      """
      Notify all current watcher for this subscription.
      Watchers will be notified in the order how they were added to this subscription.
      :param topic: The URI of the topic.
      :type topic: str
      :param event: The event (payload).
      :type event: obj
      """
      assert(self._isActive)
      assert(isinstance(event, Event))
      for watcher in self._watchers:
         watcher(event)
class PublishOptions:
   """
   Wrapper allowing to specify a topic to be published to while providing
   details on exactly how the publishing should be performed.
   """
   def __init__(self,
                excludeMe = None,
                exclude = None,
                eligible = None,
                discloseMe = None):
      """
      Constructor.
      
      :param topic: The URI of the topic to publish to, e.g. "com.myapp.mytopic1".
      :type topic: str
      :param discloseMe: Request to disclose the identity of the caller (it's WAMP session ID)
                         to Callees. Note that a Dealer, depending on Dealer configuration, might
                         reject the request, or might disclose the Callee's identity without
                         a request to do so.
      :type discloseMe: bool
      """
      assert(excludeMe is None or type(excludeMe) == bool)
      assert(exclude is None or (type(exclude) == list and all(type(x) == int for x in exclude)))
      assert(eligible is None or (type(eligible) == list and all(type(x) == int for x in eligible)))
      assert(discloseMe is None or type(discloseMe) == bool)
      self.excludeMe = excludeMe
      self.exclude = exclude
      self.eligible = eligible
      self.discloseMe = discloseMe
class Event:
   def __init__(self, topic, payload, publication, publisher = None):
      self.topic = topic
      self.payload = payload
      self.publication = publication
      self.publisher = publisher
 | |
| 
	
"""Various parsers for Postgres-specific data formats."""
import re
import skytools
__all__ = [
    "parse_pgarray", "parse_logtriga_sql", "parse_tabbed_table",
    "parse_statements", 'sql_tokenizer', 'parse_sqltriga_sql',
    "parse_acl", "dedent", "hsize_to_bytes",
    "parse_connect_string", "merge_connect_string"]
_rc_listelem = re.compile(r'( [^,"}]+ | ["] ( [^"\\]+ | [\\]. )* ["] )', re.X)
def parse_pgarray(array):
    r"""Parse Postgres array and return list of items inside it.
    Examples:
    >>> parse_pgarray('{}')
    []
    >>> parse_pgarray('{a,b,null,"null"}')
    ['a', 'b', None, 'null']
    >>> parse_pgarray(r'{"a,a","b\"b","c\\c"}')
    ['a,a', 'b"b', 'c\\c']
    >>> parse_pgarray("[0,3]={1,2,3}")
    ['1', '2', '3']
    """
    if array is None:
        return None
    if not array or array[0] not in ("{", "[") or array[-1] != '}':
        raise Exception("bad array format: must be surrounded with {}")
    res = []
    pos = 1
    # skip optional dimensions descriptor "[a,b]={...}"
    if array[0] == "[":
        pos = array.find('{') + 1
        if pos < 1:
            raise Exception("bad array format: must be surrounded with {}")
    while 1:
        m = _rc_listelem.search(array, pos)
        if not m:
            break
        pos2 = m.end()
        item = array[pos:pos2]
        if len(item) == 4 and item.upper() == "NULL":
            val = None
        else:
            if len(item) > 0 and item[0] == '"':
                if len(item) == 1 or item[-1] != '"':
                    raise Exception("bad array format: broken '\"'")
                item = item[1:-1]
            val = skytools.unescape(item)
        res.append(val)
        pos = pos2 + 1
        if array[pos2] == "}":
            break
        elif array[pos2] != ",":
            raise Exception("bad array format: expected ,} got " + repr(array[pos2]))
    if pos < len(array) - 1:
        raise Exception("bad array format: failed to parse completely (pos=%d len=%d)" % (pos, len(array)))
    return res
#
# parse logtriga partial sql
#
class _logtriga_parser:
    """Parses logtriga/sqltriga partial SQL to values."""
    def tokenizer(self, sql):
        """Token generator."""
        for typ, tok in sql_tokenizer(sql, ignore_whitespace = True):
            yield tok
    def parse_insert(self, tk, fields, values, key_fields, key_values):
        """Handler for inserts."""
        # (col1, col2) values ('data', null)
        if tk.next() != "(":
            raise Exception("syntax error")
        while 1:
            fields.append(tk.next())
            t = tk.next()
            if t == ")":
                break
            elif t != ",":
                raise Exception("syntax error")
        if tk.next().lower() != "values":
            raise Exception("syntax error, expected VALUES")
        if tk.next() != "(":
            raise Exception("syntax error, expected (")
        while 1:
            values.append(tk.next())
            t = tk.next()
            if t == ")":
                break
            if t == ",":
                continue
            raise Exception("expected , or ) got "+t)
        t = tk.next()
        raise Exception("expected EOF, got " + repr(t))
    def parse_update(self, tk, fields, values, key_fields, key_values):
        """Handler for updates."""
        # col1 = 'data1', col2 = null where pk1 = 'pk1' and pk2 = 'pk2'
        while 1:
            fields.append(tk.next())
            if tk.next() != "=":
                raise Exception("syntax error")
            values.append(tk.next())
            t = tk.next()
            if t == ",":
                continue
            elif t.lower() == "where":
                break
            else:
                raise Exception("syntax error, expected WHERE or , got "+repr(t))
        while 1:
            fld = tk.next()
            key_fields.append(fld)
            self.pklist.append(fld)
            if tk.next() != "=":
                raise Exception("syntax error")
            key_values.append(tk.next())
            t = tk.next()
            if t.lower() != "and":
                raise Exception("syntax error, expected AND got "+repr(t))
    def parse_delete(self, tk, fields, values, key_fields, key_values):
        """Handler for deletes."""
        # pk1 = 'pk1' and pk2 = 'pk2'
        while 1:
            fld = tk.next()
            key_fields.append(fld)
            self.pklist.append(fld)
            if tk.next() != "=":
                raise Exception("syntax error")
            key_values.append(tk.next())
            t = tk.next()
            if t.lower() != "and":
                raise Exception("syntax error, expected AND, got "+repr(t))
    def _create_dbdict(self, fields, values):
        fields = [skytools.unquote_ident(f) for f in fields]
        values = [skytools.unquote_literal(f) for f in values]
        return skytools.dbdict(zip(fields, values))
    def parse_sql(self, op, sql, pklist=None, splitkeys=False):
        """Main entry point."""
        if pklist is None:
            self.pklist = []
        else:
            self.pklist = pklist
        tk = self.tokenizer(sql)
        fields = []
        values = []
        key_fields = []
        key_values = []
        try:
            if op == "I":
                self.parse_insert(tk, fields, values, key_fields, key_values)
            elif op == "U":
                self.parse_update(tk, fields, values, key_fields, key_values)
            elif op == "D":
                self.parse_delete(tk, fields, values, key_fields, key_values)
            raise Exception("syntax error")
        except StopIteration:
            # last sanity check
            if (len(fields) + len(key_fields) == 0 or
                len(fields) != len(values) or
                len(key_fields) != len(key_values)):
                raise Exception("syntax error, fields do not match values")
        if splitkeys:
            return (self._create_dbdict(key_fields, key_values),
                    self._create_dbdict(fields, values))
        return self._create_dbdict(fields + key_fields, values + key_values)
def parse_logtriga_sql(op, sql, splitkeys=False):
    return parse_sqltriga_sql(op, sql, splitkeys=splitkeys)
def parse_sqltriga_sql(op, sql, pklist=None, splitkeys=False):
    """Parse partial SQL used by pgq.sqltriga() back to data values.
    Parser has following limitations:
     - Expects standard_quoted_strings = off
     - Does not support dollar quoting.
     - Does not support complex expressions anywhere. (hashtext(col1) = hashtext(val1))
     - WHERE expression must not contain IS (NOT) NULL
     - Does not support updating pk value, unless you use the splitkeys parameter.
    Returns dict of col->data pairs.
    Insert event:
    >>> parse_logtriga_sql('I', '(id, data) values (1, null)')
    {'data': None, 'id': '1'}
    Update event:
    >>> parse_logtriga_sql('U', "data='foo' where id = 1")
    {'data': 'foo', 'id': '1'}
    Delete event:
    >>> parse_logtriga_sql('D', "id = 1 and id2 = 'str''val'")
    {'id2': "str'val", 'id': '1'}
    If you set the splitkeys parameter, it will return two dicts, one for key
    fields and one for data fields.
    Insert event:
    >>> parse_logtriga_sql('I', '(id, data) values (1, null)', splitkeys=True)
    ({}, {'data': None, 'id': '1'})
    Update event:
    >>> parse_logtriga_sql('U', "data='foo' where id = 1", splitkeys=True)
    ({'id': '1'}, {'data': 'foo'})
    Delete event:
    >>> parse_logtriga_sql('D', "id = 1 and id2 = 'str''val'", splitkeys=True)
    ({'id2': "str'val", 'id': '1'}, {})
    """
    return _logtriga_parser().parse_sql(op, sql, pklist, splitkeys=splitkeys)
def parse_tabbed_table(txt):
    r"""Parse a tab-separated table into list of dicts.
    Expect first row to be column names.
    Very primitive.
    Example:
    >>> parse_tabbed_table('col1\tcol2\nval1\tval2\n')
    [{'col2': 'val2', 'col1': 'val1'}]
    """
    txt = txt.replace("\r\n", "\n")
    fields = None
    data = []
    for ln in txt.split("\n"):
        if not ln:
            continue
        if not fields:
            fields = ln.split("\t")
            continue
        cols = ln.split("\t")
        if len(cols) != len(fields):
            continue
        row = dict(zip(fields, cols))
        data.append(row)
    return data
_extstr = r""" ['] (?: [^'\\]+ | \\. | [']['] )* ['] """
_stdstr = r""" ['] (?: [^']+ | [']['] )* ['] """
_name = r""" (?: [a-z_][a-z0-9_$]* | " (?: [^"]+ | "" )* " ) """
_ident   = r""" (?P<ident> %s ) """ % _name
_fqident = r""" (?P<ident> %s (?: \. %s )* ) """ % (_name, _name)
_base_sql = r"""
      (?P<dolq>   (?P<dname> [$] (?: [_a-z][_a-z0-9]*)? [$] )
                  .*?
                  (?P=dname) )
    | (?P<num>    [0-9][0-9.e]* )
    | (?P<numarg> [$] [0-9]+ )
    | (?P<pyold>  [%][(] [a-z_][a-z0-9_]* [)] [s] )
    | (?P<pynew>  [{] [^{}]+ [}] )
    | (?P<ws>     (?: \s+ | [/][*] .*? [*][/] | [-][-][^\n]* )+ )
    | (?P<sym>    (?: [-+*~!@#^&|?/%<>=]+ | [,()\[\].:;] ) )
    | (?P<error>  . )"""
_base_sql_fq = r"%s | %s" % (_fqident, _base_sql)
_base_sql    = r"%s | %s" % (_ident, _base_sql)
_std_sql    = r"""(?: (?P<str> [E] %s | %s ) | %s )""" % (_extstr, _stdstr, _base_sql)
_std_sql_fq = r"""(?: (?P<str> [E] %s | %s ) | %s )""" % (_extstr, _stdstr, _base_sql_fq)
_ext_sql    = r"""(?: (?P<str> [E]? %s ) | %s )""" % (_extstr, _base_sql)
_ext_sql_fq = r"""(?: (?P<str> [E]? %s ) | %s )""" % (_extstr, _base_sql_fq)
_std_sql_rc = _ext_sql_rc = None
_std_sql_fq_rc = _ext_sql_fq_rc = None
def sql_tokenizer(sql, standard_quoting = False, ignore_whitespace = False,
                  fqident = False, show_location = False):
    r"""Parser SQL to tokens.
    Iterator, returns (toktype, tokstr) tuples.
    Example
    >>> [x for x in sql_tokenizer("select * from a.b", ignore_whitespace=True)]
    [('ident', 'select'), ('sym', '*'), ('ident', 'from'), ('ident', 'a'), ('sym', '.'), ('ident', 'b')]
    >>> [x for x in sql_tokenizer("\"c olumn\",'str''val'")]
    [('ident', '"c olumn"'), ('sym', ','), ('str', "'str''val'")]
    >>> list(sql_tokenizer('a.b a."b "" c" a.1', fqident=True, ignore_whitespace=True))
    [('ident', 'a.b'), ('ident', 'a."b "" c"'), ('ident', 'a'), ('sym', '.'), ('num', '1')]
    """
    global _std_sql_rc, _ext_sql_rc, _std_sql_fq_rc, _ext_sql_fq_rc
    if not _std_sql_rc:
        _std_sql_rc = re.compile(_std_sql, re.X | re.I | re.S)
        _ext_sql_rc = re.compile(_ext_sql, re.X | re.I | re.S)
        _std_sql_fq_rc = re.compile(_std_sql_fq, re.X | re.I | re.S)
        _ext_sql_fq_rc = re.compile(_ext_sql_fq, re.X | re.I | re.S)
    if standard_quoting:
        if fqident:
            rc = _std_sql_fq_rc
        else:
            rc = _std_sql_rc
    else:
        if fqident:
            rc = _ext_sql_fq_rc
        else:
            rc = _ext_sql_rc
    pos = 0
    while 1:
        m = rc.match(sql, pos)
        if not m:
            break
        pos = m.end()
        typ = m.lastgroup
        if ignore_whitespace and typ == "ws":
            continue
        tk = m.group()
        if show_location:
            yield (typ, tk, pos)
        else:
            yield (typ, tk)
_copy_from_stdin_re = "copy.*from\s+stdin"
_copy_from_stdin_rc = None
def parse_statements(sql, standard_quoting = False):
    """Parse multi-statement string into separate statements.
    Returns list of statements.
    >>> [sql for sql in parse_statements("begin; select 1; select 'foo'; end;")]
    ['begin;', 'select 1;', "select 'foo';", 'end;']
    """
    global _copy_from_stdin_rc
    if not _copy_from_stdin_rc:
        _copy_from_stdin_rc = re.compile(_copy_from_stdin_re, re.X | re.I)
    tokens = []
    pcount = 0 # '(' level
    for typ, t in sql_tokenizer(sql, standard_quoting = standard_quoting):
        # skip whitespace and comments before statement
        if len(tokens) == 0 and typ == "ws":
            continue
        # keep the rest
        tokens.append(t)
        if t == "(":
            pcount += 1
        elif t == ")":
            pcount -= 1
        elif t == ";" and pcount == 0:
            sql = "".join(tokens)
            if _copy_from_stdin_rc.match(sql):
                raise Exception("copy from stdin not supported")
            yield ("".join(tokens))
            tokens = []
    if len(tokens) > 0:
        yield ("".join(tokens))
    if pcount != 0:
        raise Exception("syntax error - unbalanced parenthesis")
_acl_name = r'(?: [0-9a-z_]+ | " (?: [^"]+ | "" )* " )'
_acl_re = r'''
    \s* (?: group \s+ | user \s+ )?
    (?P<tgt> %s )?
    (?P<perm> = [a-z*]*  )?
    (?P<owner> / %s )?
    \s* $
    ''' % (_acl_name, _acl_name)
_acl_rc = None
def parse_acl(acl):
    """Parse ACL entry.
    >>> parse_acl('user=rwx/owner')
    ('user', 'rwx', 'owner')
    >>> parse_acl('" ""user"=rwx/" ""owner"')
    (' "user', 'rwx', ' "owner')
    >>> parse_acl('user=rwx')
    ('user', 'rwx', None)
    >>> parse_acl('=/f')
    (None, '', 'f')
    """
    global _acl_rc
    if not _acl_rc:
        _acl_rc = re.compile(_acl_re, re.I | re.X)
    m = _acl_rc.match(acl)
    if not m:
        return None
    target = m.group('tgt')
    perm = m.group('perm')
    owner = m.group('owner')
    if target:
        target = skytools.unquote_ident(target)
    if perm:
        perm = perm[1:]
    if owner:
        owner = skytools.unquote_ident(owner[1:])
    return (target, perm, owner)
def dedent(doc):
    r"""Relaxed dedent.
    - takes whitespace to be removed from first indented line.
    - allows empty or non-indented lines at the start
    - allows first line to be unindented
    - skips empty lines at the start
    - ignores indent of empty lines
    - if line does not match common indent, is stays unchanged
    >>> dedent('  Line1:\n    Line 2\n')
    'Line1:\n  Line 2\n'
    >>> dedent('  \nLine1:\n  Line 2\n Line 3\n    Line 4')
    'Line1:\nLine 2\n Line 3\n  Line 4\n'
    """
    pfx = None
    res = []
    for ln in doc.splitlines():
        ln = ln.rstrip()
        if not pfx and len(res) < 2:
            if not ln:
                continue
            wslen = len(ln) - len(ln.lstrip())
            pfx = ln[ : wslen]
        if pfx:
            if ln.startswith(pfx):
                ln = ln[ len(pfx) : ]
        res.append(ln)
    res.append('')
    return '\n'.join(res)
def hsize_to_bytes (input):
    """ Convert sizes from human format to bytes (string to integer) """
    assert isinstance (input, str)
    m = re.match (r"^([0-9]+) *([KMGTPEZY]?)B?$", input.strip(), re.IGNORECASE)
    if not m: raise ValueError ("cannot parse: %s" % input)
    units = ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
    bytes = int(m.group(1)) * 1024 ** units.index(m.group(2).upper())
    return bytes
#
# Connect string parsing
#
_cstr_rx = r""" \s* (\w+) \s* = \s* ( ' ( \\.| [^'\\] )* ' | \S+ ) \s* """
_cstr_unesc_rx = r"\\(.)"
_cstr_badval_rx = r"[\s'\\]"
_cstr_rc = None
_cstr_unesc_rc = None
_cstr_badval_rc = None
def parse_connect_string(cstr):
    r"""Parse Postgres connect string.
    >>> parse_connect_string("host=foo")
    [('host', 'foo')]
    >>> parse_connect_string(r" host = foo password = ' f\\\o\'o ' ")
    [('host', 'foo'), ('password', "' f\\o'o '")]
    """
    global _cstr_rc, _cstr_unesc_rc
    if not _cstr_rc:
        _cstr_rc = re.compile(_cstr_rx, re.X)
        _cstr_unesc_rc = re.compile(_cstr_unesc_rx)
    pos = 0
    res = []
    while pos < len(cstr):
        m = _cstr_rc.match(cstr, pos)
        if not m:
            raise ValueError('Invalid connect string')
        pos = m.end()
        k = m.group(1)
        v = m.group(2)
        if v[0] == "'":
            v = _cstr_unesc_rc.sub(r"\1", v)
        res.append( (k,v) )
    return res
def merge_connect_string(cstr_arg_list):
    """Put fragments back together.
    >>> merge_connect_string([('host', 'ip'), ('pass', ''), ('x', ' ')])
    "host=ip pass='' x=' '"
    """
    global _cstr_badval_rc
    if not _cstr_badval_rc:
        _cstr_badval_rc = re.compile(_cstr_badval_rx)
    buf = []
    for k, v in cstr_arg_list:
        if not v or _cstr_badval_rc.search(v):
            v = v.replace('\\', r'\\')
            v = v.replace("'", r"\'")
            v = "'" + v + "'"
        buf.append("%s=%s" % (k, v))
    return ' '.join(buf)
if __name__ == '__main__':
    import doctest
    doctest.testmod()
 | |
| 
	import numpy as np
from numpy.testing import assert_allclose, assert_equal
from menpo.transform import (Affine, Similarity, Rotation, Scale,
                             NonUniformScale, UniformScale, Translation,
                             Homogeneous)
from nose.tools import raises
@raises(ValueError)
def test_1d_translation():
    t_vec = np.array([1])
    Translation(t_vec)
@raises(ValueError)
def test_5d_translation():
    t_vec = np.ones(5)
    Translation(t_vec)
def test_translation():
    t_vec = np.array([1, 2, 3])
    starting_vector = np.random.rand(10, 3)
    transform = Translation(t_vec)
    transformed = transform.apply(starting_vector)
    assert_allclose(starting_vector + t_vec, transformed)
def test_basic_2d_rotation():
    rotation_matrix = np.array([[0, 1],
                                [-1, 0]])
    rotation = Rotation(rotation_matrix)
    assert_allclose(np.array([0, -1]), rotation.apply(np.array([1, 0])))
def test_basic_2d_rotation_axis_angle():
    rotation_matrix = np.array([[0, 1],
                                [-1, 0]])
    rotation = Rotation(rotation_matrix)
    axis, angle = rotation.axis_and_angle_of_rotation()
    assert_allclose(axis, np.array([0, 0, 1]))
    assert_allclose((90 * np.pi)/180, angle)
def test_basic_3d_rotation():
    a = np.sqrt(3.0)/2.0
    b = 0.5
    # this is a rotation of -30 degrees about the x axis
    rotation_matrix = np.array([[1, 0, 0],
                                [0, a, b],
                                [0, -b, a]])
    rotation = Rotation(rotation_matrix)
    starting_vector = np.array([0, 1, 0])
    transformed = rotation.apply(starting_vector)
    assert_allclose(np.array([0, a, -b]), transformed)
def test_basic_3d_rotation_axis_angle():
    a = np.sqrt(3.0)/2.0
    b = 0.5
    # this is a rotation of -30 degrees about the x axis
    rotation_matrix = np.array([[1, 0, 0],
                                [0, a, b],
                                [0, -b, a]])
    rotation = Rotation(rotation_matrix)
    axis, angle = rotation.axis_and_angle_of_rotation()
    assert_allclose(axis, np.array([1, 0, 0]))
    assert_allclose((-30 * np.pi)/180, angle)
def test_3d_rotation_inverse_eye():
    a = np.sqrt(3.0)/2.0
    b = 0.5
    # this is a rotation of -30 degrees about the x axis
    rotation_matrix = np.array([[1, 0, 0],
                                [0, a, b],
                                [0, -b, a]])
    rotation = Rotation(rotation_matrix)
    transformed = rotation.compose_before(rotation.pseudoinverse())
    assert_allclose(np.eye(4), transformed.h_matrix, atol=1e-15)
def test_basic_2d_affine():
    linear_component = np.array([[1, -6],
                                 [-3, 2]])
    translation_component = np.array([7, -8])
    h_matrix = np.eye(3, 3)
    h_matrix[:-1, :-1] = linear_component
    h_matrix[:-1, -1] = translation_component
    affine = Affine(h_matrix)
    x = np.array([[0, 1],
                  [1, 1],
                  [-1, -5],
                  [3, -5]])
    # transform x explicitly
    solution = np.dot(x, linear_component.T) + translation_component
    # transform x using the affine transform
    result = affine.apply(x)
    # check that both answers are equivalent
    assert_allclose(solution, result)
    # create several copies of x
    x_copies = np.array([x, x, x, x, x, x, x, x])
    # transform all of copies at once using the affine transform
    results = affine.apply(x_copies)
    # check that all copies have been transformed correctly
    for r in results:
        assert_allclose(solution, r)
def test_basic_3d_affine():
    linear_component = np.array([[1, 6, -4],
                                 [-3, -2, 5],
                                 [5, -1, 3]])
    translation_component = np.array([7, -8, 9])
    h_matrix = np.eye(4, 4)
    h_matrix[:-1, :-1] = linear_component
    h_matrix[:-1, -1] = translation_component
    affine = Affine(h_matrix)
    x = np.array([[0, 1,  2],
                  [1, 1, 1],
                  [-1, 2, -5],
                  [1, -5, -1]])
    # transform x explicitly
    solution = np.dot(x, linear_component.T) + translation_component
    # transform x using the affine transform
    result = affine.apply(x)
    # check that both answers are equivalent
    assert_allclose(solution, result)
    # create several copies of x
    x_copies = np.array([x, x, x, x, x, x, x, x])
    # transform all of copies at once using the affine transform
    results = affine.apply(x_copies)
    # check that all copies have been transformed correctly
    for r in results:
        assert_allclose(solution, r)
def test_basic_2d_similarity():
    linear_component = np.array([[2, -6],
                                 [6, 2]])
    translation_component = np.array([7, -8])
    h_matrix = np.eye(3, 3)
    h_matrix[:-1, :-1] = linear_component
    h_matrix[:-1, -1] = translation_component
    similarity = Similarity(h_matrix)
    x = np.array([[0, 1],
                  [1, 1],
                  [-1, -5],
                  [3, -5]])
    # transform x explicitly
    solution = np.dot(x, linear_component.T) + translation_component
    # transform x using the affine transform
    result = similarity.apply(x)
    # check that both answers are equivalent
    assert_allclose(solution, result)
    # create several copies of x
    x_copies = np.array([x, x, x, x, x, x, x, x])
    # transform all of copies at once using the affine transform
    results = similarity.apply(x_copies)
    # check that all copies have been transformed correctly
    for r in results:
        assert_allclose(solution, r)
def test_similarity_2d_from_vector():
    params = np.array([0.2, 0.1, 1, 2])
    homo = np.array([[params[0] + 1, -params[1], params[2]],
                     [params[1], params[0] + 1, params[3]],
                     [0, 0, 1]])
    sim = Similarity.init_identity(2).from_vector(params)
    assert_equal(sim.h_matrix, homo)
def test_similarity_2d_as_vector():
    params = np.array([0.2, 0.1, 1.0, 2.0])
    homo = np.array([[params[0] + 1.0, -params[1], params[2]],
                     [params[1], params[0] + 1.0, params[3]],
                     [0.0, 0.0, 1.0]])
    vec = Similarity(homo).as_vector()
    assert_allclose(vec, params)
def test_translation_2d_from_vector():
    params = np.array([1, 2])
    homo = np.array([[1, 0, params[0]],
                     [0, 1, params[1]],
                     [0, 0, 1]])
    tr = Translation.init_identity(2).from_vector(params)
    assert_equal(tr.h_matrix, homo)
def test_translation_2d_as_vector():
    params = np.array([1, 2])
    vec = Translation(params).as_vector()
    assert_allclose(vec, params)
def test_translation_3d_from_vector():
    params = np.array([1, 2, 3])
    homo = np.array([[1, 0, 0, params[0]],
                     [0, 1, 0, params[1]],
                     [0, 0, 1, params[2]],
                     [0, 0, 0, 1]])
    tr = Translation.init_identity(3).from_vector(params)
    assert_equal(tr.h_matrix, homo)
def test_translation_3d_as_vector():
    params = np.array([1, 2, 3])
    vec = Translation(params).as_vector()
    assert_allclose(vec, params)
def test_uniformscale2d_update_from_vector():
    # make a uniform scale of 1, 2 dimensional
    uniform_scale = UniformScale(1, 2)
    new_scale = 2
    homo = np.array([[new_scale, 0, 0],
                     [0, new_scale, 0],
                     [0, 0, 1]])
    uniform_scale.from_vector_inplace(new_scale)
    assert_equal(uniform_scale.h_matrix, homo)
def test_uniformscale2d_as_vector():
    scale = 2
    vec = UniformScale(scale, 2).as_vector()
    assert_allclose(vec, scale)
def test_nonuniformscale2d_from_vector():
    scale = np.array([1, 2])
    homo = np.array([[scale[0], 0, 0],
                     [0, scale[1], 0],
                     [0, 0, 1]])
    tr = NonUniformScale.init_identity(2).from_vector(scale)
    assert_equal(tr.h_matrix, homo)
def test_nonuniformscale2d_update_from_vector():
    scale = np.array([3, 4])
    homo = np.array([[scale[0], 0, 0],
                     [0, scale[1], 0],
                     [0, 0, 1]])
    tr = NonUniformScale(np.array([1, 2]))
    tr.from_vector_inplace(scale)
    assert_equal(tr.h_matrix, homo)
def test_nonuniformscale2d_as_vector():
    scale = np.array([1, 2])
    vec = NonUniformScale(scale).as_vector()
    assert_allclose(vec, scale)
def test_uniformscale3d_from_vector():
    scale = 2
    homo = np.array([[scale, 0, 0, 0],
                     [0, scale, 0, 0],
                     [0, 0, scale, 0],
                     [0, 0, 0, 1]])
    uniform_scale = UniformScale(1, 3)
    tr = uniform_scale.from_vector(scale)
    assert_equal(tr.h_matrix, homo)
def test_uniformscale3d_as_vector():
    scale = 2
    vec = UniformScale(scale, 3).as_vector()
    assert_allclose(vec, scale)
def test_uniformscale_build_2d():
    scale = 2
    homo = np.array([[scale, 0, 0],
                     [0, scale, 0],
                     [0, 0, 1]])
    tr = UniformScale(scale, 2)
    assert_equal(tr.h_matrix, homo)
def test_uniformscale_build_3d():
    scale = 2
    homo = np.array([[scale, 0, 0, 0],
                     [0, scale, 0, 0],
                     [0, 0, scale, 0],
                     [0, 0, 0, 1]])
    tr = UniformScale(scale, 3)
    assert(isinstance(tr, UniformScale))
    assert_equal(tr.h_matrix, homo)
@raises(ValueError)
def test_uniformscale_build_4d_raise_dimensionalityerror():
    UniformScale(1, 4)
def test_scale_build_2d_uniform_pass_dim():
    scale = 2
    ndim = 2
    tr = Scale(scale, ndim)
    assert(isinstance(tr, UniformScale))
def test_scale_build_3d_uniform_pass_dim():
    scale = 2
    ndim = 3
    tr = Scale(scale, ndim)
    assert(isinstance(tr, UniformScale))
def test_scale_build_2d_nonuniform():
    scale = np.array([1, 2])
    tr = Scale(scale)
    assert(isinstance(tr, NonUniformScale))
def test_scale_build_2d_uniform_from_vec():
    scale = np.array([2, 2])
    tr = Scale(scale)
    assert(isinstance(tr, UniformScale))
@raises(ValueError)
def test_scale_zero_scale_raise_valuerror():
    Scale(np.array([1, 0]))
# Vectorizable interface tests
@raises(NotImplementedError)
def test_rotation3d_from_vector_raises_notimplementederror():
    Rotation.init_identity(3).from_vector(0)
@raises(NotImplementedError)
def test_rotation3d_as_vector_raises_notimplementederror():
    Rotation.init_identity(3).as_vector()
def test_affine_2d_n_parameters():
    homo = np.eye(3)
    t = Affine(homo)
    assert(t.n_parameters == 6)
def test_affine_2d_n_dims_output():
    homo = np.eye(3)
    t = Affine(homo)
    assert(t.n_dims_output == 2)
def test_affine_3d_n_parameters():
    homo = np.eye(4)
    t = Affine(homo)
    assert(t.n_parameters == 12)
def test_similarity_2d_n_parameters():
    homo = np.eye(3)
    t = Similarity(homo)
    assert(t.n_parameters == 4)
@raises(NotImplementedError)
def test_similarity_3d_n_parameters_raises_notimplementederror():
    homo = np.eye(4)
    t = Similarity(homo)
    # Raises exception
    t.n_parameters
def test_uniformscale2d_n_parameters():
    scale = 2
    t = UniformScale(scale, 2)
    assert(t.n_parameters == 1)
def test_uniformscale3d_n_parameters():
    scale = 2
    t = UniformScale(scale, 3)
    assert(t.n_parameters == 1)
def test_nonuniformscale_2d_n_parameters():
    scale = np.array([1, 2])
    t = NonUniformScale(scale)
    assert(t.n_parameters == 2)
def test_translation_2d_n_parameters():
    trans = np.array([1, 2])
    t = Translation(trans)
    assert(t.n_parameters == 2)
def test_translation_3d_n_parameters():
    trans = np.array([1, 2, 3])
    t = Translation(trans)
    assert(t.n_parameters == 3)
@raises(NotImplementedError)
def test_rotation2d_n_parameters_raises_notimplementederror():
    rot_matrix = np.eye(2)
    t = Rotation(rot_matrix)
    t.n_parameters
@raises(NotImplementedError)
def test_rotation3d_n_parameters_raises_notimplementederror():
    rot_matrix = np.eye(3)
    t = Rotation(rot_matrix)
    # Throws exception
    t.n_parameters
# Test list construction is equivalent to ndarray construction
def test_translation_from_list():
    t_a = Translation([3, 4])
    t_b = Translation(np.array([3, 4]))
    assert(np.all(t_a.h_matrix == t_b.h_matrix))
def test_nonuniformscale_from_list():
    u_a = NonUniformScale([3, 2, 3])
    u_b = NonUniformScale(np.array([3, 2, 3]))
    assert(np.all(u_a.h_matrix == u_b.h_matrix))
# Test set_h_matrix is not allowed on similarity subclasses + uniformscale
@raises(NotImplementedError)
def test_similarity_set_h_matrix_raises_notimplementederror():
    s = Similarity(np.eye(3))
    s.set_h_matrix(s.h_matrix)
@raises(NotImplementedError)
def test_translation_set_h_matrix_raises_notimplementederror():
    t = Translation([3, 4])
    t.set_h_matrix(t.h_matrix)
@raises(NotImplementedError)
def test_rotation_set_h_matrix_raises_notimplementederror():
    r = Rotation(np.array([[1, 0], [0, 1]]))
    r.set_h_matrix(r.h_matrix)
@raises(NotImplementedError)
def test_uniformscale_set_h_matrix_raises_notimplementederror():
    s = UniformScale(2, 3)
    s.set_h_matrix(s.h_matrix)
@raises(NotImplementedError)
def test_nonuniformscale_set_h_matrix_raises_notimplementederror():
    s = NonUniformScale([2, 3, 4])
    s.set_h_matrix(s.h_matrix)
def test_homogeneous_print():
    e = np.eye(3)
    h = Homogeneous(e)
    assert(str(h) == 'Homogeneous\n[[ 1.  0.  0.]\n [ 0.  1.  0.]'
                     '\n [ 0.  0.  1.]]')
def test_homogeneous_eye():
    e = np.eye(3)
    h = Homogeneous.init_identity(2)
    assert_allclose(e, h.h_matrix)
def test_homogeneous_has_true_inverse():
    h = Homogeneous.init_identity(2)
    assert h.has_true_inverse
def test_homogeneous_inverse():
    e = np.eye(3) * 2
    e[2, 2] = 1
    e_inv = np.eye(3) * 0.5
    e_inv[2, 2] = 1
    h = Homogeneous(e)
    assert_allclose(h.pseudoinverse().h_matrix, e_inv)
def test_homogeneous_apply():
    e = np.eye(3) * 2
    p = np.random.rand(10, 2)
    e[2, 2] = 1
    e[:2, -1] = [2, 3]
    h = Homogeneous(e)
    p_applied = h.apply(p)
    p_manual = p * 2 + np.array([2, 3])
    assert_allclose(p_applied, p_manual)
def test_homogeneous_apply_batched():
    e = np.eye(3) * 2
    p = np.random.rand(10, 2)
    e[2, 2] = 1
    e[:2, -1] = [2, 3]
    h = Homogeneous(e)
    p_applied = h.apply(p, batch_size=2)
    p_manual = p * 2 + np.array([2, 3])
    assert_allclose(p_applied, p_manual)
def test_homogeneous_as_vector():
    e = np.eye(3) * 2
    e[2, 2] = 1
    h = Homogeneous(e)
    assert_allclose(h.as_vector(), e.flatten())
def test_homogeneous_from_vector_inplace():
    h = Homogeneous(np.eye(3))
    e = np.eye(3) * 2
    e[2, 2] = 1
    h.from_vector_inplace(e.ravel())
    assert_allclose(h.h_matrix, e)
 | 
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.
