text
stringlengths
29
850k
# Generated by Django 3.1.2 on 2021-03-22 18:40 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('website', '0009_auto_20210322_1837'), ] operations = [ migrations.AlterField( model_name='banner', name='color', field=models.CharField(default='hs-yellow', help_text='Bakgrunnsfargen til banneret som en hex-farge. hs-green, hs-yellow og hs-red støttes også som presets.', max_length=10, verbose_name='bannercolor'), ), migrations.AlterField( model_name='banner', name='site', field=models.CharField(default='*', help_text="Det interne navnet på URL-stien til sidene som banneret skal dukke opp på. Wildcard (*) støttes. F.eks. er '*' ALLE sider, 'inventory:*' er alle lagersider.", max_length=250, verbose_name='bannersider'), ), migrations.AlterField( model_name='banner', name='text', field=models.TextField(default='Sample Text', help_text='Tekst som vises i banneret.', max_length=1000, verbose_name='bannertext'), ), migrations.AlterField( model_name='banner', name='text_color', field=models.CharField(default='hs-black', help_text='Tekstfargen på banneret. hs-white og hs-black støttes som presets.', max_length=10, verbose_name='bannertextcolor'), ), ]
West LA Dem Club’s monthly General Meeting featured national author David Dayen discussing his new book “Chain Of Title”, an investigative explanation of the fraud committed by banks millions of times as they foreclosed on American homeowners. Excellant story of how this massive fraud started in FL. It takes courage to do the jobs of Lisa, Michael, and Lynn. Thanks for detailing their stories, and what caused the Big Short, 99 Homes and others. Now how do we fix this ongoing wave of fraudclosure??? I listened to his story about people who lost there jobs but what about the people who paid there mortgage and made all payments on time, and still had there homes stolen?
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for stochastic graphs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf st = tf.contrib.bayesflow.stochastic_tensor sg = tf.contrib.bayesflow.stochastic_graph distributions = tf.contrib.distributions class NormalNotParam(distributions.Normal): @property def is_reparameterized(self): return False class TestSurrogateLosses(tf.test.TestCase): def testPathwiseDerivativeDoesNotAddSurrogateLosses(self): with self.test_session(): mu = [0.0, 0.1, 0.2] sigma = tf.constant([1.1, 1.2, 1.3]) with st.value_type(st.SampleAndReshapeValue()): prior = st.StochasticTensor(distributions.Normal, mu=mu, sigma=sigma) likelihood = st.StochasticTensor( distributions.Normal, mu=prior, sigma=sigma) self.assertTrue(prior.distribution.is_reparameterized) self.assertTrue(likelihood.distribution.is_reparameterized) loss = tf.square(tf.identity(likelihood) - [0.0, 0.1, 0.2]) sum_loss = tf.reduce_sum(loss) surrogate_loss = sg.surrogate_loss([loss]) with self.assertRaisesRegexp(ValueError, "dimensionality 1 or greater"): _ = sg.surrogate_loss([sum_loss]) surrogate_from_both = sg.surrogate_loss( [loss, sum_loss * tf.ones_like(loss)]) # Pathwise derivative terms do not require add'l surrogate loss terms. with self.test_session() as sess: self.assertAllClose(*sess.run([loss, surrogate_loss])) self.assertAllClose(*sess.run([(loss + sum_loss), surrogate_from_both])) def _testSurrogateLoss(self, session, losses, expected_addl_terms, xs): surrogate_loss = sg.surrogate_loss(losses) expected_surrogate_loss = tf.add_n(losses + expected_addl_terms) self.assertAllClose(*session.run([surrogate_loss, expected_surrogate_loss])) # Test backprop expected_grads = tf.gradients(ys=expected_surrogate_loss, xs=xs) surrogate_grads = tf.gradients(ys=surrogate_loss, xs=xs) self.assertEqual(len(expected_grads), len(surrogate_grads)) grad_values = session.run(expected_grads + surrogate_grads) n_grad = len(expected_grads) self.assertAllClose(grad_values[:n_grad], grad_values[n_grad:]) def testSurrogateLoss(self): with self.test_session() as sess: mu = tf.constant([0.0, 0.1, 0.2]) sigma = tf.constant([1.1, 1.2, 1.3]) with st.value_type(st.SampleAndReshapeValue()): prior = st.StochasticTensor(NormalNotParam, mu=mu, sigma=sigma) likelihood = st.StochasticTensor( NormalNotParam, mu=prior, sigma=sigma) prior_2 = st.StochasticTensor(NormalNotParam, mu=mu, sigma=sigma) loss = tf.square(tf.identity(likelihood) - mu) part_loss = tf.square(tf.identity(prior) - mu) sum_loss = tf.reduce_sum(loss) loss_nodeps = tf.square(tf.identity(prior_2) - mu) # For ground truth, use the stop-gradient versions of the losses loss_nograd = tf.stop_gradient(loss) loss_nodeps_nograd = tf.stop_gradient(loss_nodeps) sum_loss_nograd = tf.stop_gradient(sum_loss) # These score functions should ignore prior_2 self._testSurrogateLoss( session=sess, losses=[loss], expected_addl_terms=[ likelihood.distribution.log_pdf(likelihood.value()) * loss_nograd, prior.distribution.log_pdf(prior.value()) * loss_nograd], xs=[mu, sigma]) self._testSurrogateLoss( session=sess, losses=[loss, part_loss], expected_addl_terms=[ likelihood.distribution.log_pdf(likelihood.value()) * loss_nograd, (prior.distribution.log_pdf(prior.value()) * tf.stop_gradient(part_loss + loss))], xs=[mu, sigma]) self._testSurrogateLoss( session=sess, losses=[sum_loss * tf.ones_like(loss)], expected_addl_terms=[ (likelihood.distribution.log_pdf(likelihood.value()) * sum_loss_nograd), prior.distribution.log_pdf(prior.value()) * sum_loss_nograd], xs=[mu, sigma]) self._testSurrogateLoss( session=sess, losses=[loss, sum_loss * tf.ones_like(loss)], expected_addl_terms=[ (likelihood.distribution.log_pdf(likelihood.value()) * tf.stop_gradient(loss + sum_loss)), (prior.distribution.log_pdf(prior.value()) * tf.stop_gradient(loss + sum_loss))], xs=[mu, sigma]) # These score functions should ignore prior and likelihood self._testSurrogateLoss( session=sess, losses=[loss_nodeps], expected_addl_terms=[(prior_2.distribution.log_pdf(prior_2.value()) * loss_nodeps_nograd)], xs=[mu, sigma]) # These score functions should include all terms selectively self._testSurrogateLoss( session=sess, losses=[loss, loss_nodeps], # We can't guarantee ordering of output losses in this case. expected_addl_terms=[ (likelihood.distribution.log_pdf(likelihood.value()) * loss_nograd), prior.distribution.log_pdf(prior.value()) * loss_nograd, (prior_2.distribution.log_pdf(prior_2.value()) * loss_nodeps_nograd)], xs=[mu, sigma]) def testNoSurrogateLoss(self): with self.test_session(): mu = tf.constant([0.0, 0.1, 0.2]) sigma = tf.constant([1.1, 1.2, 1.3]) with st.value_type(st.SampleAndReshapeValue()): dt = st.StochasticTensor(NormalNotParam, mu=mu, sigma=sigma, loss_fn=None) self.assertEqual(None, dt.loss(tf.constant([2.0]))) def testExplicitStochasticTensors(self): with self.test_session() as sess: mu = tf.constant([0.0, 0.1, 0.2]) sigma = tf.constant([1.1, 1.2, 1.3]) with st.value_type(st.SampleAndReshapeValue()): dt1 = st.StochasticTensor(NormalNotParam, mu=mu, sigma=sigma) dt2 = st.StochasticTensor(NormalNotParam, mu=mu, sigma=sigma) loss = tf.square(tf.identity(dt1)) + 10. + dt2 sl_all = sg.surrogate_loss([loss]) sl_dt1 = sg.surrogate_loss([loss], stochastic_tensors=[dt1]) sl_dt2 = sg.surrogate_loss([loss], stochastic_tensors=[dt2]) dt1_term = dt1.distribution.log_pdf(dt1) * loss dt2_term = dt2.distribution.log_pdf(dt2) * loss self.assertAllClose(*sess.run( [sl_all, sum([loss, dt1_term, dt2_term])])) self.assertAllClose(*sess.run([sl_dt1, sum([loss, dt1_term])])) self.assertAllClose(*sess.run([sl_dt2, sum([loss, dt2_term])])) class StochasticDependenciesMapTest(tf.test.TestCase): def testBuildsMapOfUpstreamNodes(self): dt1 = st.StochasticTensor(distributions.Normal, mu=0., sigma=1.) dt2 = st.StochasticTensor(distributions.Normal, mu=0., sigma=1.) out1 = dt1.value() + 1. out2 = dt2.value() + 2. x = out1 + out2 y = out2 * 3. dep_map = sg._stochastic_dependencies_map([x, y]) self.assertEqual(dep_map[dt1], set([x])) self.assertEqual(dep_map[dt2], set([x, y])) def testHandlesStackedStochasticNodes(self): dt1 = st.StochasticTensor(distributions.Normal, mu=0., sigma=1.) out1 = dt1.value() + 1. dt2 = st.StochasticTensor(distributions.Normal, mu=out1, sigma=1.) x = dt2.value() + 2. dt3 = st.StochasticTensor(distributions.Normal, mu=0., sigma=1.) y = dt3.value() * 3. dep_map = sg._stochastic_dependencies_map([x, y]) self.assertEqual(dep_map[dt1], set([x])) self.assertEqual(dep_map[dt2], set([x])) self.assertEqual(dep_map[dt3], set([y])) def testTraversesControlInputs(self): dt1 = st.StochasticTensor(distributions.Normal, mu=0., sigma=1.) logits = dt1.value() * 3. dt2 = st.StochasticTensor(distributions.Bernoulli, logits=logits) dt3 = st.StochasticTensor(distributions.Normal, mu=0., sigma=1.) x = dt3.value() y = tf.ones((2, 2)) * 4. z = tf.ones((2, 2)) * 3. out = tf.cond( tf.cast(dt2, tf.bool), lambda: tf.add(x, y), lambda: tf.square(z)) out += 5. dep_map = sg._stochastic_dependencies_map([out]) self.assertEqual(dep_map[dt1], set([out])) self.assertEqual(dep_map[dt2], set([out])) self.assertEqual(dep_map[dt3], set([out])) if __name__ == "__main__": tf.test.main()
Greg and Katie Pierce's boys, Asher and Keegan are one year old today! Happy Birthday! I've thoroughly enjoyed following their progress over the last 12 months. Here's to many more!
''' MetaFunctions is a function composition and data pipelining library. For more information, please visit the `project on github <https://github.com/ForeverWintr/metafunctions>`_. ''' import os import sys import contextlib import pathlib import shutil from setuptools import setup, find_packages, Command import metafunctions here = os.path.abspath(os.path.dirname(__file__)) class UploadCommand(Command): """ Support setup.py upload. https://github.com/kennethreitz/setup.py/blob/master/setup.py """ description = 'Build and publish the package.' user_options = [] @staticmethod def status(s): """Prints things in bold.""" print('\033[1m{0}\033[0m'.format(s)) def initialize_options(self): pass def finalize_options(self): pass def run(self): try: self.status('Removing previous builds…') shutil.rmtree(os.path.join(here, 'dist')) except OSError: pass self.status('Building Source and Wheel (universal) distribution…') os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable)) self.status('Uploading the package to PyPi via Twine…') os.system('twine upload dist/*') sys.exit() setup( name=metafunctions.__name__, version=metafunctions.__version__, description='Metafunctions is a function composition and data pipelining library', long_description=__doc__, url='https://github.com/ForeverWintr/metafunctions', author='Tom Rutherford', author_email='[email protected]', license='MIT', # See https://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Topic :: Software Development :: Libraries :: Python Modules', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', ], keywords='functional-programming function-composition', packages=find_packages(), test_suite='metafunctions.tests', install_requires='ansicolors>=1.1.8', # $ setup.py publish support. cmdclass={ 'upload': UploadCommand, }, )
Atkinson Graphics is a family-owned & operated, full service Custom Apparel & Screen Printing Company, located in southeastern NH. We specialize in custom t-shirt design and printing! For over 20 years, we’ve been putting sweet threads on our customers’ backs and smiles on their faces! By offering a large selection of apparel & products, combined with the advanced on-site printing & embroidery equipment, highly skilled employees, and a full in-house graphic design department, we make it easy and affordable to create eye-catching promotional products. Atkinson Graphics has the ability to print any size order. Using one to eight colors, we are a fully automated company which allows us to get the product to our customers faster and more cost effective. This automated process has several advantages, one being consistency. We strive to ensure that your order is printed the same way with the same specifications, every time, on time! Printing for customers for 20 years! I’ve been working with Dave for many years, and I can tell you that they are top-notch. Great pricing, excellent printing quality and the team takes pride in their work. If you are looking for a screen printer who really cares about your project, you won’t go wrong with Atkinson Graphics. Bill A. We’re committed to serving our local community. We enjoy opportunities to give back to the community by using our skills and talents to help support local causes and make a positive difference in our community. Like us on Facebook for news on specials, sales, new products & services, and more! Atkinson Graphics is a family-owned & operated, full service Custom Apparel & Screen Printing Company, located in southeastern NH. We specialize in custom t-shirt design and printing! For over 20 years, we've been putting sweet threads on our customers' backs and smiles on their faces! By offering a large selection of apparel & products, combined with the advanced on-site printing & embroidery equipment, highly skilled employees, and a full in-house graphic design department, we make it easy and affordable to create eye-catching promotional products. Learn more.
from scalymongo import Document from tests.acceptance.base_acceptance_test import BaseAcceptanceTest class FindExample(Document): structure = { 'name': basestring, 'age': int, } indexes = [{ 'fields': [('name', 1)], }] __database__ = 'test' __collection__ = __file__ class BaseFindTest(BaseAcceptanceTest): @classmethod def setup_class(cls): BaseAcceptanceTest.setup_class() cls.connection.models.FindExample.collection.drop() cls.docs = [ {'name': 'Alice', 'age': 32}, {'name': 'Bob', 'age': 32}, {'name': 'Carl', 'age': 41}, {'name': 'Donna', 'age': 35}, ] cls.docs = [cls.connection.models.FindExample(doc) for doc in cls.docs] for doc in cls.docs: doc.save() cls.connection.models.FindExample.ensure_indexes() @classmethod def teardown_class(cls): super(BaseFindTest, cls).teardown_class() cls.connection.models.FindExample.collection.drop() class PropertyReturnsScalyMongoDocuments(object): def should_return_only_find_example_instances(self): for returned_doc in self.returned_docs: assert isinstance(returned_doc, FindExample) class WhenFindingByAge(BaseFindTest): @classmethod def setup_class(cls): BaseFindTest.setup_class() cls.returned = cls.connection.models.FindExample.find({'age': 32}) cls.returned_docs = list(cls.returned) def should_find_alice_and_bob(self): assert self.returned_docs == self.docs[:2] def should_return_2_results(self): assert self.returned.count() == 2 class WhenFindingWithoutArgs(BaseFindTest, PropertyReturnsScalyMongoDocuments): @classmethod def setup_class(cls): BaseFindTest.setup_class() cls.returned = cls.connection.models.FindExample.find() cls.returned_docs = list(cls.returned) def should_find_all(self): assert self.returned_docs == self.docs class WhenFindingWithoutArgsOnRewoundCursor(BaseFindTest): @classmethod def setup_class(cls): BaseFindTest.setup_class() cls.returned = cls.connection.models.FindExample.find() cls.first_returned_docs = list(cls.returned) cls.returned = cls.returned.rewind() cls.second_returned_docs = list(cls.returned) def should_find_all(self): assert self.first_returned_docs == self.docs assert self.second_returned_docs == self.docs def should_return_find_example_instances(self): for doc in self.first_returned_docs: assert isinstance(doc, FindExample) for doc in self.second_returned_docs: assert isinstance(doc, FindExample) class WhenFindingWithoutArgsOnClonedCursor(BaseFindTest): @classmethod def setup_class(cls): BaseFindTest.setup_class() cls.returned = cls.connection.models.FindExample.find() cls.first_returned_docs = list(cls.returned) cls.returned = cls.returned.clone() cls.second_returned_docs = list(cls.returned) def should_find_all(self): assert self.first_returned_docs == self.docs assert self.second_returned_docs == self.docs def should_return_find_example_instances(self): for doc in self.first_returned_docs: assert isinstance(doc, FindExample) for doc in self.second_returned_docs: assert isinstance(doc, FindExample) class WhenNoDocumentsMatch(BaseFindTest): @classmethod def setup_class(cls): BaseFindTest.setup_class() cls.returned = cls.connection.models.FindExample.find( {'name': 'John'}) cls.returned_docs = list(cls.returned) def should_return_0_results(self): assert self.returned.count() == 0 class WhenFindingWithSkip(BaseFindTest, PropertyReturnsScalyMongoDocuments): @classmethod def setup_class(cls): BaseFindTest.setup_class() cls.returned = cls.connection.models.FindExample.find().skip(1) cls.returned_docs = list(cls.returned) def should_return_Bob_and_Carl(self): assert self.returned_docs == self.docs[1:] class WhenFindingWithLimit(BaseFindTest, PropertyReturnsScalyMongoDocuments): @classmethod def setup_class(cls): BaseFindTest.setup_class() cls.returned = cls.connection.models.FindExample.find().limit(1) cls.returned_docs = list(cls.returned) def should_return_only_first(self): assert self.returned_docs == [self.docs[0]] class WhenSortingByNameInverted(BaseFindTest, PropertyReturnsScalyMongoDocuments): @classmethod def setup_class(cls): BaseFindTest.setup_class() cls.returned = cls.connection.models.FindExample.find().sort( [('name', -1)]) cls.returned_docs = list(cls.returned) def should_return_4_results(self): assert self.returned.count() == 4 def should_return_Donna_Carl_Bob_and_Alice(self): assert self.returned_docs[0] == self.docs[-1] assert self.returned_docs[1] == self.docs[-2] assert self.returned_docs[2] == self.docs[-3] assert self.returned_docs[3] == self.docs[-4] class WhenFilteringWithAWhereClause(BaseFindTest, PropertyReturnsScalyMongoDocuments): @classmethod def setup_class(cls): BaseFindTest.setup_class() cls.returned = cls.connection.models.FindExample.find().where( 'this.age>35') cls.returned_docs = list(cls.returned) def should_return_1_result(self): assert self.returned.count() == 1 def should_return_Carl(self): assert self.returned_docs == [self.docs[2]] class WhenGettingASlice(BaseFindTest): @classmethod def setup_class(cls): BaseFindTest.setup_class() cls.returned = cls.connection.models.FindExample.find()[1:2] cls.returned_docs = list(cls.returned) def should_return_Bob_and_Carl(self): assert self.returned_docs == self.docs[1:2] def should_return_1_result(self): assert self.returned.count(True) == 1 class WhenFindingAge32WithMaxScanOf1( BaseFindTest, PropertyReturnsScalyMongoDocuments): @classmethod def setup_class(cls): BaseFindTest.setup_class() cls.returned = cls.connection.models.FindExample.find( {'age': 32}).max_scan(1) cls.returned_docs = list(cls.returned) def should_return_only_Alice(self): assert self.returned_docs == [self.docs[0]] class WhenFindingAllWithHint(BaseFindTest, PropertyReturnsScalyMongoDocuments): @classmethod def setup_class(cls): BaseFindTest.setup_class() cls.returned = cls.connection.models.FindExample.find().hint( [('name', 1)]) cls.returned_docs = list(cls.returned) def should_find_all(self): assert self.returned_docs == self.docs class WhenFindingAllWithBatchSize(BaseFindTest, PropertyReturnsScalyMongoDocuments): @classmethod def setup_class(cls): BaseFindTest.setup_class() cls.returned = cls.connection.models.FindExample.find().batch_size(5) cls.returned_docs = list(cls.returned) def should_find_all(self): assert self.returned_docs == self.docs
More than 500 Cast Members from Walt Disney World Resort recently gathered at Disney’s Coronado Springs Resort for the launch of the Women’s Inclusion Network, the latest of Disney’s diversity resource groups. The event, which highlighted leadership, empowerment, advocacy and development among women and men, encouraged Cast Members to achieve their greatest potential and create a legacy of inclusion. “The mission of this group is to elevate and highlight the powerful influence of diverse women on our culture, people and market, as champions of education, development, mentorship and sponsorship,” said Maribeth Bisienere, senior vice president, Walt Disney World Park Operations. The concept for the Women’s Inclusion Network (WIN) began in 2014 with a group of Walt Disney Imagineers who recognized a need to organize resources for the women in their division. During the event, Disney executives led a panel discussion that touched on intersectionality, success, challenges and self-worth. The group’s launch event was attended not only by women from various Disney lines of business, but many men.
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import testscenarios import testtools import yaml from heatclient.common import template_format load_tests = testscenarios.load_tests_apply_scenarios class YamlParseExceptions(testtools.TestCase): scenarios = [ ('scanner', dict(raised_exception=yaml.scanner.ScannerError())), ('parser', dict(raised_exception=yaml.parser.ParserError())), ('reader', dict(raised_exception=yaml.reader.ReaderError('', '', '', '', ''))), ] def test_parse_to_value_exception(self): text = 'not important' with mock.patch.object(yaml, 'load') as yaml_loader: yaml_loader.side_effect = self.raised_exception self.assertRaises(ValueError, template_format.parse, text) def test_parse_no_version_format(self): yaml = '' self.assertRaises(ValueError, template_format.parse, yaml) yaml2 = '''Parameters: {} Mappings: {} Resources: {} Outputs: {} ''' self.assertRaises(ValueError, template_format.parse, yaml2)
by: Pastor Carlos Barcelona Jr. So the Israelites did as Joshua commanded them. They took twelve stones from the middle of the Jordan, according to the number of the tribes of the Israelites, as the Lord had told Joshua; and they carried them over with them to their camp, where they put them down. Joshua set up the twelve stones that had been in the middle of the Jordan at the spot where the priests who carried the ark of the covenant had stood. And they are there to this day. And the priests came up out of the river carrying the ark of the covenant of the Lord. No sooner had they set their feet on the dry ground than the waters of the Jordan returned to their place and ran at flood stage as before. On the tenth day of the first month the people went up from the Jordan and camped at Gilgal on the eastern border of Jericho. Rocks are not hard to find in the Holy Land. You can’t miss the rocks—they’re everywhere. There are huge stones and boulders, and smaller rocks and pebbles that fill the streams. From the Negev Desert in the south to the fertile fields of Galilee in the north, you see rocks everywhere you look. And not just rocks but piles of stones or monuments, just like the one described in Joshua 4. Monuments are built not only to help those living at the time these events occurred to remember what happened, but also to remind future generations of what happened. The 12 stones taken out of Jordan and put on the west bank of the river were a reminder of God’s tremendous power on Israel’s behalf. It was important for God‘s people, Israel, to set up a memorial to help them never forget what God had done for them in the crossing of the Jordan River. After 40 years of wandering in the wilderness, the people of God have at long last entered the Promised Land. Just as it took a miracle to get them out of Egypt, it took a miracle to get them into Canaan. Just as God parted the Red Sea for Moses, he parted the Jordan River for Joshua. Both miracles happened so that each generation would know the Lord was with them in their time of need. The Red Sea miracle met the need of the older generation; the Jordan River miracle met the need of the younger generation. And the same God performed both miracles. Joshua 3 tells us that the Jordan River was at flood stage, which meant it was far too deep and far too swift for several million Jews and their livestock to safely cross over. So this is how it happened. Joshua told the priests to lift the Ark of the Covenant on their shoulder and step out into the water. The moment their feet touched the swirling muddy water, the river stopped flowing and the ground beneath their feet dried up. The whole nation passed in front of the priests and the Ark, crossing on dry ground into the Promised Land. It was a mighty miracle of God—a moment to be remembered forever. But Joshua knew that even mighty miracles could be forgotten unless we do something to remember them. So he instructed 12 men—one from each tribe—to take a large stone from the middle of the riverbed where the priests were standing with the Ark of the Covenant. Each man was to take one large stone, put it on his shoulder, and carry it to the place where the Israelites would camp that night—a place called Gilgal. These 12 stones were to be arranged or piled as a memorial of how God had led His people across the Jordan River at floodtime. Joshua had two things in mind when he had the people build a monument—a memorial—from those 12 stones. Second, it was a testimony to the watching world. Verse 24 says, “He did this so that all the peoples of the earth might know that the hand of the LORD is powerful.” Those 12 stones reminded the nation of what God had done. They were visual evidence that in the moment of crisis, God had brought his people safely across the Jordan River. They testified to God’s faithfulness in the past so that future generations would know that they too could trust the Lord. When the pagans saw those stones, they would know that the God of Israel was a mighty God. The fourth chapter of Joshua grapples with the fact that a basic component of life is a hope for the future that is based on the memories of the past- which help bring meaning to the present. Memories are important! They are the soil of our present experiences into which our roots sink deeply and from which we receive nourishment. The superstructures of our lives are built upon the foundation of our memories. We are instructed by our memories as to the most creative way to live in the present, and they help equip us with a positive hope for the future. God through the centuries has given us signs, symbols and memorials. We need to be reminded of the importance of these and occasionally review them, both for ourselves, our children and our people. God gave Noah the rainbow as a perpetual reminder of His love. He gave Abraham and the Jews the circumcision as a sign of His covenant. God gave Miriam’s poetry to commemorate the crossing of the Red Sea. He gave the Ark, holding the law and the manna as symbols of His continuing provision. The Temple in Jerusalem reminded all of His dwelling. God gave us Himself, in human form in the miracle of the incarnation. The Lord Jesus gave us baptism as a symbol of His death, burial and resurrection which free us from any condemnation. The Lord’s Supper enables us to look back to the cross and forward to the marriage supper of the Lamb as we handle the bread and the cup. There is a place for memorials. We dare not forget this, and we must keep this thought. These stones were to be an enduring sermon, directed not only for the ears but for the eyes. The pile of stones at Gilgal talks about different memories to the people of Israel as well as to the believers of today. What are the lessons we can get from this passage? We have a sacred responsibility to take the truth of God and see that it is passed down to the next generation. Psalm 102:18 says, “Let this be written for a future generation, that a people not yet created may praise the LORD.” Those who are older have a special obligation to pass on the stories of what God did for them. “Even when I am old and gray, do not forsake me, O God, till I declare your power to the next generation, your might to all who are to come” (Psalm 71:18). As Joshua makes clear, parents bear the first responsibility to teaching their children—and not just parents in general, but fathers in particular. Dads, God holds you accountable for the spiritual development of your children. Your sons and daughters look to you for answers. When they ask you, “What do these stones mean?” what will you say? When they ask you about the church, salvation, giving, the Bible, communion, dedication, prayer, holiness and sin, what will you answer? The Christian movement is always only one generation from extinction. And every church is only one generation away from closing. If we do not pass along the faith to the rising generation, we have failed at our most important task. We must tell them what God has done for us—and then we must tell them again and again until the stories are tattooed on their souls. Tell your children how God answered your prayers in times of trouble. Tell them how Jesus rescued you from a life of sin. Tell them how you saw God do amazing things—tell the stories and then tell them again. Every generation needs its own stories. The older generation had the Red Sea, the younger generation had the Jordan River. Joshua was concerned about his generation. Though he was past middle age, he was looking to the future, thinking about the legacy of faith he would pass on to the next generation. Christian parents and leaders, let us not think that everything will go well with our next generation or with our children when we don’t take up our responsibilities today. We need to pass on the memory of God’s work because when memory fails, faith falters. Kenneth Gangel writes, ― ‘Faith requires that we look forward, but it also requires that we look back.’ Someone has said, The greatest enemy of faith may be forgetfulness.’ If we forget how God has worked and provided in the past, we may fail to trust him for the future. Israel‘s fathers and mothers failed to pass on to their children the truth of what God had done. As a result their children didn‘t know the LORD or what He had done for Israel. What God did in causing the waters of the Jordan to stand in a heap for Israel was forgotten, as a result the people of Israel forgot the LORD and rebelled against Him by doing what was right in their own eyes. Just one generation later, the nation was involved in materialism, idolatry, and the worst sorts of immorality. We set our children up for spiritual meltdown when we fail to teach them what God has done for us. As Godly parents we must return to some memories in order for us to pass them to our children. What are they? 1. The memories of places. Places for us that are every bit as spiritually significant as the pile of stone at Gilgal. We must return to the place of our dedication and commitment, your prayer closet so that our children’s eyes would see. 2. The memories of people. They could be your Sunday School teacher, mentor, pastor, leader, parents whom God used in your life. Remember them in the eyes of your children. 3. Experiences. Somehow or another we had wonderful experiences in the past that we can tell our children. Tell them how you called on the name of the Lord and He answered your prayer and they too will learn to call on the name of the Lord. 4. Physical gestures. It’s not all about going to the altar, kneeling down or raising a hand during the service but a gesture of commitment and love. I will never forget this person’s gesture. He shook my hands with folded money in his palm, tried to find a way for us to be restored. Even if I don’t get to see him today, I will never forget this person. That gesture reminds me to do the same to my leaders and members, that they too can do the same gesture to their people. 5. Mementos or souvenirs. God knows how we think. He knows that our memories are triggered by objects. Do you still keep that bookmark, book that has touched or challenged you? Until today I still long to read Kathryn Khulman’s book on, “I Believe In Miracles” because that book challenged my faith so much when I was in the field. There are two commands here: Keep and meditate. Keeping and meditating has something to do with memory. So it actually means, when we return to the Word we will become successful. Success means fruitfulness; not bad fruits but good fruits and these fruits are your children and those under your care. We should not fail to model and teach God’s Word because it takes only one generation for degeneration to take place. It seems impossible that in a couple of generations people could forget God and what He has done for them. But isn‘t that exactly what we see in many churches today? In 1992, I was sent to Libertad, Antique as a missionary. We found out that there was actually an AG church there but it closed down years before we came. We tried to find the members but to our sadness, we only found 1 old and cold couple. They go to a church but the church they attended was a church that was there 30 years before we came and the number of people going into that church is getting lesser and lesser. Why? Many old members had already died, some are dying but no new members came including the old members’ children. Deuteronomy 8:7-14 (NIV)—―For the LORD your God is bringing you into a good land—a land with streams and pools of water, with springs flowing in the valleys and hills; a land with wheat and barley, vines and fig trees, pomegranates, olive oil and honey; a land where bread will not be scarce and you will lack nothing; a land where the rocks are iron and you can dig copper out of the hills. When you have eaten and are satisfied, praise the LORD your God for the good land he has given you. Be careful that you do not forget the LORD your God, failing to observe his commands, his laws and his decrees that I am giving you this day. Otherwise, when you eat and are satisfied, when you build fine houses and settle down, and when your herds and flocks grow large and your silver and gold increase and all you have is multiplied, then your heart will become proud and you will forget the LORD your God, who brought you out of Egypt, out of the land of slavery. Does God‘s warning of Israel remind you of anything similar today? What are we doing now that we are more blessed than before? Or maybe we are not that blessed yet but what are we doing while we are on the way to the blessing? What are our children, spiritual children and potential spiritual children learning from us? Are we passing on the Word of God and testimony of His work to them? The whole chapter of Joshua 4 reminds all parents, guardians, pastors, leaders and every believer that we should not fail to model and teach God’s Word because it takes only one generation for degeneration to take place. Pass on the memory of God’s work so that the next generation will not fail. Take the sacred responsibility which is the truth of God and see that it is passed down to the next generation. My brothers and sisters, let’s not be so preoccupied with religiosity or legitimacy and don’t get so busy trying to get God to do what we want done that we lose sight of our responsibility to do what He wants done.
import math import random import pygame from buffalo import utils from npc import NPC class Enemy(NPC): # Class for anything hostile. For now it just follows the player around, # There's not enough in the game in terms of health, damage, and item usage # To have actual combat def __init__(self, name=None, fPos=None, **kwargs): speed = kwargs.get("speed") if kwargs.get("speed") is not None else .05 NPC.__init__(self, name=name, fPos=fPos, speed=speed, spawn=kwargs.get("spawn")) def update(self, target): # If it's close enough to the player it won't move # If it's too far away it will stop trying if self.fPos[0] != target[0] and math.hypot(self.fPos[1]-target[1], self.fPos[0]-target[0]) > 32 and math.hypot(self.fPos[1]-target[1], self.fPos[0]-target[0]) < 600: # Some fancy trig to get the direction it needs to go to follow the player angle = math.atan((self.fPos[1]-target[1])/(self.fPos[0]-target[0])) if self.fPos[0] - target[0] > 0: angle = math.pi + angle self.move(angle) else: self.move(None) NPC.update(self)
We manufacture & suppliers of boom lift, manlift, scissor lift, crawler cranes, cherry picker, lifting equipment, articulate boom lift, straight boom lift, diesel scissor lift, electric scissor lift, jlg 45 boom lift, jlg 60 boom lift components etc. We manufacture & suppliers car wheel, used cars, car mud flap, hydraulic car lift equipment\'s etc. If you need a Car Insurance Gainesville ga, you have come to the right place. Visit us today or call at 770-536-3555.
# -*- coding: utf-8 -*- # Generated by Django 1.9.8 on 2016-11-28 09:45 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('core', '0003_auto_20161119_0927'), ] operations = [ migrations.CreateModel( name='Checklist', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='ChecklistItem', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=255)), ('to_be_checked_on_matchday', models.IntegerField(blank=True, null=True)), ('to_be_checked_on_matchday_pattern', models.IntegerField(blank=True, null=True)), ('to_be_checked_if_home_match_tomorrow', models.BooleanField(default=False)), ('checklist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Checklist')), ('last_checked_on_matchday', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Matchday')), ], ), ]
Is Technology Amplifying Human Potential, or Amusing Ourselves to Death? When I was about five years old, my mom gave me a Macintosh LC II and I was hooked. Not to Facebook or the Internet, they didn’t exist yet. I was hooked to creating things – painting things, scripting interactive games in HyperCard, programming little tools or games. Like the technology visionaries of the 1970’s and 80’s like Doug Engelbart, Alan Kay and Steve Jobs, I optimistically believed computers could be “bicycles for our minds” and amplify human potential. And they did empower us. But today, in the year 2015, “empowerment” rarely feels like my day to day experience with technology. Instead I feel constantly lured into distractions. I get sucked endlessly into email, distracting websites. I get bulldozed by interruptive text messages, back and forth scheduling, or find myself scrolling a website in a trance at 1am. It’s scary how true this feels today. Huxley was concerned about what’s irresistible to our instincts. Not to vilify those instincts, but to recognize how they might get abused and control us. Just like we have built-in gustatory instincts for salt, sugar and fat that are incredibly useful for survival on the Savannah but abused by our modern food environment, Huxley knew we have built-in instincts for novelty, curiosity gaps, social approval and fear of missing something important. These instincts are useful to have on the Savannah but our media environment adversarially exploits them to keep us glued to screens. So why is our experience of the Internet and computing going this way? Towards distraction, and away from empowerment? It’s because we live in an attention economy. The attention economy means that no matter what a technology company aims to create – an informative news site like the New York Times, a meditation app to help people meditate, a social network, or an addictive game – they win by getting people to spend time. What starts as an honest competition to make useful things that people spend time on, devolves into a race to the bottom of the brain stem to maximize the time we spend. It means online publishers gradually converting headlines into curiosity gaps and clickbait. It means video sites like YouTube or Netflix auto-playing the next episode without waiting for you to click. It means businesses sending push notifications and email to pretend you are miss something important if you don’t check. And we’re vulnerable to these mechanisms. Knowing their tricks doesn’t inoculate us to their efficacy. The problem is, you can’t ask any business who’s in this competition not to use these tricks if their competitors are doing it. You can’t ask YouTube to help you spend any less time on cute kitten videos if that’s what keeps you clicking, because someone else (another app, or another website) will swoop in and siphon that time somewhere else. Stock prices depend on keeping engagement numbers high. It’s only going to get worse as businesses compete. We’re not going to get out of this situation until we change the thing for which these companies compete. From the currency of “Time Spent” to something else. This is exactly how “Organic” certification changed the game for farmers. By defining and standardizing what makes “safe” cultivation practices (no pesticides), the farmers who wanted to do what’s “good for us” no longer got undercut by farmers who used unsafe pesticides to achieve lower prices. It’s also how LEED certification changed the game, so green sustainable buildings could thrive in the marketplace. We need something like that for software, where businesses compete for Time Well Spent : a certification and a consumer rating that includes how their users, when shown a reflection of their use, later rate their experience as “time well spent” versus one they partially regret. Like Yelp reviews, but for experiences. Imagine a world where “Time Well Spent” determines your stock price, your popularity in app stores, your ranking in news feeds, your ability to attract talented employees, your media attention, and funding possibilities. It’s a B-Corp movement for technology. I used to believe we could just ask software designers to take on moral responsibility for how they shape the billions of minutes and hours of other people’s lives. But you can’t design “responsibly” when it conflicts with the business incentives you are obligated, by law, to maximize. This is a long road, but we can get there by starting a new conversation. Instead of having the old conversation about self-control and waiting for cultural norms to adapt automatically, direct your friends and family to a new conversation. Let’s set incentives to create a world where the Internet and my devices amplify human potential again, and where we can trust-fall into the whirlpool of technology and know that it is on our team to help us spend our time, and our lives, well. ← Previous Post Is your web browser a credit card for your time?
# -*- coding: utf-8 -*- """ Features ------ Extra features Elements. """ from jinja2 import Template import json from .utilities import (color_brewer, _parse_size, legend_scaler, _locations_mirror, _locations_tolist, image_to_url) from .element import Element, Figure, JavascriptLink, CssLink, MacroElement from .map import TileLayer, Icon class WmsTileLayer(TileLayer): def __init__(self, url, name=None, format=None, layers=None, transparent=True, attribution=None): """TODO docstring here Parameters ---------- """ super(TileLayer, self).__init__() self._name = 'WmsTileLayer' self.tile_name = name if name is not None else 'WmsTileLayer_'+self._id self.url = url self.format = format self.layers = layers self.transparent = transparent # if attribution is None: # raise ValueError('WMS must' # ' also be passed an attribution') self.attribution = attribution self._template = Template(u""" {% macro script(this, kwargs) %} var {{this.get_name()}} = L.tileLayer.wms( '{{ this.url }}', { format:'{{ this.format }}', transparent: {{ this.transparent.__str__().lower() }}, layers:'{{ this.layers }}' {% if this.attribution %}, attribution:'{{this.attribution}}'{% endif %} } ).addTo({{this._parent.get_name()}}); {% endmacro %} """) class RegularPolygonMarker(MacroElement): def __init__(self, location, popup=None, color='black', opacity=1, weight=2, fill_color='blue', fill_opacity=1, number_of_sides=4, rotation=0, radius=15): """TODO : docstring here""" super(RegularPolygonMarker, self).__init__() self._name = 'RegularPolygonMarker' self.location = location self.color = color self.opacity = opacity self.weight = weight self.fill_color = fill_color self.fill_opacity = fill_opacity self.number_of_sides = number_of_sides self.rotation = rotation self.radius = radius if popup is not None: self.add_children(popup) self._template = Template(u""" {% macro script(this, kwargs) %} var {{this.get_name()}} = new L.RegularPolygonMarker( new L.LatLng({{this.location[0]}},{{this.location[1]}}), { icon : new L.Icon.Default(), color: '{{this.color}}', opacity: {{this.opacity}}, weight: {{this.weight}}, fillColor: '{{this.fill_color}}', fillOpacity: {{this.fill_opacity}}, numberOfSides: {{this.number_of_sides}}, rotation: {{this.rotation}}, radius: {{this.radius}} } ) .addTo({{this._parent.get_name()}}); {% endmacro %} """) def render(self, **kwargs): super(RegularPolygonMarker, self).render() figure = self.get_root() assert isinstance(figure, Figure), ("You cannot render this Element " "if it's not in a Figure.") figure.header.add_children( JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/leaflet-dvf/0.2/leaflet-dvf.markers.min.js"), name='dvf_js') class Vega(Element): def __init__(self, data, width='100%', height='100%', left="0%", top="0%", position='relative'): """TODO : docstring here""" super(Vega, self).__init__() self._name = 'Vega' self.data = data # Size Parameters. self.width = _parse_size(width) self.height = _parse_size(height) self.left = _parse_size(left) self.top = _parse_size(top) self.position = position self._template = Template(u"") def render(self, **kwargs): self.json = json.dumps(self.data) self._parent.html.add_children(Element(Template(""" <div id="{{this.get_name()}}"></div> """).render(this=self, kwargs=kwargs)), name=self.get_name()) self._parent.script.add_children(Element(Template(""" vega_parse({{this.json}},{{this.get_name()}}); """).render(this=self)), name=self.get_name()) figure = self.get_root() assert isinstance(figure, Figure), ("You cannot render this Element " "if it's not in a Figure.") figure.header.add_children(Element(Template(""" <style> #{{this.get_name()}} { position : {{this.position}}; width : {{this.width[0]}}{{this.width[1]}}; height: {{this.height[0]}}{{this.height[1]}}; left: {{this.left[0]}}{{this.left[1]}}; top: {{this.top[0]}}{{this.top[1]}}; </style> """).render(this=self, **kwargs)), name=self.get_name()) figure.header.add_children( JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.5/d3.min.js"), name='d3') figure.header.add_children( JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/vega/1.4.3/vega.min.js"), name='vega') figure.header.add_children( JavascriptLink("https://code.jquery.com/jquery-2.1.0.min.js"), name='jquery') figure.script.add_children( Template("""function vega_parse(spec, div) { vg.parse.spec(spec, function(chart) { chart({el:div}).update(); });}"""), name='vega_parse') class GeoJson(MacroElement): def __init__(self, data): """Creates a GeoJson plugin to append into a map with Map.add_plugin. Parameters ---------- data: file, dict or str. The geo-json data you want to plot. If file, then data will be read in the file and fully embedded in Leaflet's javascript. If dict, then data will be converted to JSON and embedded in the javascript. If str, then data will be passed to the javascript as-is. examples : # providing file GeoJson(open('foo.json')) # providing dict GeoJson(json.load(open('foo.json'))) # providing string GeoJson(open('foo.json').read()) """ super(GeoJson, self).__init__() self._name = 'GeoJson' if 'read' in dir(data): self.data = data.read() elif type(data) is dict: self.data = json.dumps(data) else: self.data = data self._template = Template(u""" {% macro script(this, kwargs) %} var {{this.get_name()}} = L.geoJson({{this.data}}).addTo({{this._parent.get_name()}}); {% endmacro %} """) class TopoJson(MacroElement): def __init__(self, data, object_path): """TODO docstring here. """ super(TopoJson, self).__init__() self._name = 'TopoJson' if 'read' in dir(data): self.data = data.read() elif type(data) is dict: self.data = json.dumps(data) else: self.data = data self.object_path = object_path self._template = Template(u""" {% macro script(this, kwargs) %} var {{this.get_name()}}_data = {{this.data}}; var {{this.get_name()}} = L.geoJson(topojson.feature( {{this.get_name()}}_data, {{this.get_name()}}_data.{{this.object_path}} )).addTo({{this._parent.get_name()}}); {% endmacro %} """) def render(self, **kwargs): super(TopoJson, self).render(**kwargs) figure = self.get_root() assert isinstance(figure, Figure), ("You cannot render this Element " "if it's not in a Figure.") figure.header.add_children( JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/topojson/1.6.9/topojson.min.js"), name='topojson') class GeoJsonStyle(MacroElement): def __init__(self, color_domain, color_code, color_data=None, key_on='feature.properties.color', weight=1, opacity=1, color='black', fill_opacity=0.6, dash_array=0): """TODO : docstring here. """ super(GeoJsonStyle, self).__init__() self._name = 'GeoJsonStyle' self.color_domain = color_domain self.color_range = color_brewer(color_code, n=len(color_domain)) self.color_data = json.dumps(color_data) self.key_on = key_on self.weight = weight self.opacity = opacity self.color = color self.fill_color = color_code self.fill_opacity = fill_opacity self.dash_array = dash_array self._template = Template(u""" {% macro script(this, kwargs) %} {% if not this.color_range %} var {{this.get_name()}} = { color_function : function(feature) { return '{{this.fill_color}}'; }, }; {%else%} var {{this.get_name()}} = { color_scale : d3.scale.threshold() .domain({{this.color_domain}}) .range({{this.color_range}}), color_data : {{this.color_data}}, color_function : function(feature) { {% if this.color_data=='null' %} return this.color_scale({{this.key_on}}); {% else %} return this.color_scale(this.color_data[{{this.key_on}}]); {% endif %} }, }; {%endif%} {{this._parent.get_name()}}.setStyle(function(feature) { return { fillColor: {{this.get_name()}}.color_function(feature), weight: {{this.weight}}, opacity: {{this.opacity}}, color: '{{this.color}}', fillOpacity: {{this.fill_opacity}}, dashArray: '{{this.dash_array}}' }; }); {% endmacro %} """) def render(self, **kwargs): super(GeoJsonStyle, self).render(**kwargs) figure = self.get_root() assert isinstance(figure, Figure), ("You cannot render this Element " "if it's not in a Figure.") figure.header.add_children( JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.5/d3.min.js"), name='d3') class ColorScale(MacroElement): def __init__(self, color_domain, color_code, caption=""): """TODO : docstring here. """ super(ColorScale, self).__init__() self._name = 'ColorScale' self.color_domain = color_domain self.color_range = color_brewer(color_code, n=len(color_domain)) self.tick_labels = legend_scaler(self.color_domain) self.caption = caption self.fill_color = color_code self._template = self._env.get_template('color_scale.js') def render(self, **kwargs): super(ColorScale, self).render(**kwargs) figure = self.get_root() assert isinstance(figure, Figure), ("You cannot render this Element " "if it's not in a Figure.") figure.header.add_children( JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.5/d3.min.js"), name='d3') class MarkerCluster(MacroElement): """Adds a MarkerCluster layer on the map.""" def __init__(self): """Creates a MarkerCluster element to append into a map with Map.add_children. Parameters ---------- """ super(MarkerCluster, self).__init__() self._name = 'MarkerCluster' self._template = Template(u""" {% macro script(this, kwargs) %} var {{this.get_name()}} = L.markerClusterGroup(); {{this._parent.get_name()}}.addLayer({{this.get_name()}}); {% endmacro %} """) def render(self, **kwargs): super(MarkerCluster, self).render() figure = self.get_root() assert isinstance(figure, Figure), ("You cannot render this Element " "if it's not in a Figure.") figure.header.add_children( JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/0.4.0/leaflet.markercluster-src.js"), name='marker_cluster_src') figure.header.add_children( JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/0.4.0/leaflet.markercluster.js"), name='marker_cluster') figure.header.add_children( CssLink("https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/0.4.0/MarkerCluster.css"), name='marker_cluster_css') figure.header.add_children( CssLink("https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/0.4.0/MarkerCluster.Default.css"), name="marker_cluster_default_css") class DivIcon(MacroElement): def __init__(self, width=30, height=30): """TODO : docstring here""" super(DivIcon, self).__init__() self._name = 'DivIcon' self.width = width self.height = height self._template = Template(u""" {% macro script(this, kwargs) %} var {{this.get_name()}} = L.divIcon({ className: 'leaflet-div-icon', 'iconSize': [{{ this.width }},{{ this.height }}] }); {{this._parent.get_name()}}.setIcon({{this.get_name()}}); {% endmacro %} """) class CircleMarker(MacroElement): def __init__(self, location, radius=500, color='black', fill_color='black', fill_opacity=0.6, popup=None): """TODO : docstring here """ super(CircleMarker, self).__init__() self._name = 'CircleMarker' self.location = location self.radius = radius self.color = color self.fill_color = fill_color self.fill_opacity = fill_opacity if popup is not None: self.add_children(popup) self._template = Template(u""" {% macro script(this, kwargs) %} var {{this.get_name()}} = L.circle( [{{this.location[0]}},{{this.location[1]}}], {{ this.radius }}, { color: '{{ this.color }}', fillColor: '{{ this.fill_color }}', fillOpacity: {{ this.fill_opacity }} } ) .addTo({{this._parent.get_name()}}); {% endmacro %} """) class LatLngPopup(MacroElement): def __init__(self): """TODO : docstring here """ super(LatLngPopup, self).__init__() self._name = 'LatLngPopup' self._template = Template(u""" {% macro script(this, kwargs) %} var {{this.get_name()}} = L.popup(); function latLngPop(e) { {{this.get_name()}} .setLatLng(e.latlng) .setContent("Latitude: " + e.latlng.lat.toFixed(4) + "<br>Longitude: " + e.latlng.lng.toFixed(4)) .openOn({{this._parent.get_name()}}); } {{this._parent.get_name()}}.on('click', latLngPop); {% endmacro %} """) class ClickForMarker(MacroElement): def __init__(self, popup=None): """TODO : docstring here """ super(ClickForMarker, self).__init__() self._name = 'ClickForMarker' if popup: self.popup = ''.join(['"', popup, '"']) else: self.popup = '"Latitude: " + lat + "<br>Longitude: " + lng ' self._template = Template(u""" {% macro script(this, kwargs) %} function newMarker(e){ var new_mark = L.marker().setLatLng(e.latlng).addTo({{this._parent.get_name()}}); new_mark.dragging.enable(); new_mark.on('dblclick', function(e){ {{this._parent.get_name()}}.removeLayer(e.target)}) var lat = e.latlng.lat.toFixed(4), lng = e.latlng.lng.toFixed(4); new_mark.bindPopup({{ this.popup }}); }; {{this._parent.get_name()}}.on('click', newMarker); {% endmacro %} """) class PolyLine(MacroElement): def __init__(self, locations, color=None, weight=None, opacity=None, latlon=True): """Creates a PolyLine object to append into a map with Map.add_children. Parameters ---------- locations: list of points (latitude, longitude) Latitude and Longitude of line (Northing, Easting) color: string, default Leaflet's default ('#03f') weight: float, default Leaflet's default (5) opacity: float, default Leaflet's default (0.5) latlon: bool, default True Whether locations are given in the form [[lat, lon]] or not ([[lon, lat]] if False). Note that the default GeoJson format is latlon=False, while Leaflet polyline's default is latlon=True. """ super(PolyLine, self).__init__() self._name = 'PolyLine' self.data = (_locations_mirror(locations) if not latlon else _locations_tolist(locations)) self.color = color self.weight = weight self.opacity = opacity self._template = Template(u""" {% macro script(this, kwargs) %} var {{this.get_name()}} = L.polyline( {{this.data}}, { {% if this.color != None %}color: '{{ this.color }}',{% endif %} {% if this.weight != None %}weight: {{ this.weight }},{% endif %} {% if this.opacity != None %}opacity: {{ this.opacity }},{% endif %} }); {{this._parent.get_name()}}.addLayer({{this.get_name()}}); {% endmacro %} """) class MultiPolyLine(MacroElement): def __init__(self, locations, color=None, weight=None, opacity=None, latlon=True): """Creates a MultiPolyLine object to append into a map with Map.add_children. Parameters ---------- locations: list of points (latitude, longitude) Latitude and Longitude of line (Northing, Easting) color: string, default Leaflet's default ('#03f') weight: float, default Leaflet's default (5) opacity: float, default Leaflet's default (0.5) latlon: bool, default True Whether locations are given in the form [[lat, lon]] or not ([[lon, lat]] if False). Note that the default GeoJson format is latlon=False, while Leaflet polyline's default is latlon=True. """ super(MultiPolyLine, self).__init__() self._name = 'MultiPolyLine' self.data = (_locations_mirror(locations) if not latlon else _locations_tolist(locations)) self.color = color self.weight = weight self.opacity = opacity self._template = Template(u""" {% macro script(this, kwargs) %} var {{this.get_name()}} = L.multiPolyline( {{this.data}}, { {% if this.color != None %}color: '{{ this.color }}',{% endif %} {% if this.weight != None %}weight: {{ this.weight }},{% endif %} {% if this.opacity != None %}opacity: {{ this.opacity }},{% endif %} }); {{this._parent.get_name()}}.addLayer({{this.get_name()}}); {% endmacro %} """) class ImageOverlay(MacroElement): def __init__(self, image, bounds, opacity=1., attribution=None, origin='upper', colormap=None, mercator_project=False): """Used to load and display a single image over specific bounds of the map, implements ILayer interface. Parameters ---------- image: string, file or array-like object The data you want to draw on the map. * If string, it will be written directly in the output file. * If file, it's content will be converted as embeded in the output file. * If array-like, it will be converted to PNG base64 string and embedded in the output. bounds: list Image bounds on the map in the form [[lat_min, lon_min], [lat_max, lon_max]] opacity: float, default Leaflet's default (1.0) attr: string, default Leaflet's default ("") origin : ['upper' | 'lower'], optional, default 'upper' Place the [0,0] index of the array in the upper left or lower left corner of the axes. colormap : callable, used only for `mono` image. Function of the form [x -> (r,g,b)] or [x -> (r,g,b,a)] for transforming a mono image into RGB. It must output iterables of length 3 or 4, with values between 0. and 1. Hint : you can use colormaps from `matplotlib.cm`. mercator_project : bool, default False, used only for array-like image. Transforms the data to project (longitude, latitude) coordinates to the Mercator projection. """ super(ImageOverlay, self).__init__() self._name = 'ImageOverlay' self.url = image_to_url(image, origin=origin, mercator_project=mercator_project, bounds=bounds) self.bounds = json.loads(json.dumps(bounds)) options = { 'opacity': opacity, 'attribution': attribution, } self.options = json.dumps({key: val for key, val in options.items() if val}, sort_keys=True) self._template = Template(u""" {% macro script(this, kwargs) %} var {{this.get_name()}} = L.imageOverlay( '{{ this.url }}', {{ this.bounds }}, {{ this.options }} ).addTo({{this._parent.get_name()}}); {% endmacro %} """) class CustomIcon(Icon): def __init__(self, icon_image, icon_size=None, icon_anchor=None, shadow_image=None, shadow_size=None, shadow_anchor=None, popup_anchor=None): """Create a custom icon, based on an image. Parameters ---------- icon_image : string, file or array-like object The data you want to use as an icon. * If string, it will be written directly in the output file. * If file, it's content will be converted as embedded in the output file. * If array-like, it will be converted to PNG base64 string and embedded in the output. icon_size : tuple of 2 int Size of the icon image in pixels. icon_anchor : tuple of 2 int The coordinates of the "tip" of the icon (relative to its top left corner). The icon will be aligned so that this point is at the marker's geographical location. shadow_image : string, file or array-like object The data for the shadow image. If not specified, no shadow image will be created. shadow_size : tuple of 2 int Size of the shadow image in pixels. shadow_anchor : tuple of 2 int The coordinates of the "tip" of the shadow relative to its top left corner (the same as icon_anchor if not specified). popup_anchor : tuple of 2 int The coordinates of the point from which popups will "open", relative to the icon anchor. """ super(Icon, self).__init__() self._name = 'CustomIcon' self.icon_url = image_to_url(icon_image) self.icon_size = icon_size self.icon_anchor = icon_anchor self.shadow_url = (image_to_url(shadow_image) if shadow_image is not None else None) self.shadow_size = shadow_size self.shadow_anchor = shadow_anchor self.popup_anchor = popup_anchor self._template = Template(u""" {% macro script(this, kwargs) %} var {{this.get_name()}} = L.icon({ iconUrl: '{{this.icon_url}}', {% if this.icon_size %}iconSize: [{{this.icon_size[0]}},{{this.icon_size[1]}}],{% endif %} {% if this.icon_anchor %}iconAnchor: [{{this.icon_anchor[0]}},{{this.icon_anchor[1]}}],{% endif %} {% if this.shadow_url %}shadowUrl: '{{this.shadow_url}}',{% endif %} {% if this.shadow_size %}shadowSize: [{{this.shadow_size[0]}},{{this.shadow_size[1]}}],{% endif %} {% if this.shadow_anchor %}shadowAnchor: [{{this.shadow_anchor[0]}},{{this.shadow_anchor[1]}}],{% endif %} {% if this.popup_anchor %}popupAnchor: [{{this.popup_anchor[0]}},{{this.popup_anchor[1]}}],{% endif %} }); {{this._parent.get_name()}}.setIcon({{this.get_name()}}); {% endmacro %} """)
Dear Brothers, good morning! In light of the scourge of sexual abuse perpetrated by ecclesiastics to the great harm of minors, I wanted to consult you, Patriarchs, Cardinals, Archbishops, Bishops, and Religious Superiors and Leaders, so that together we might listen to the Holy Spirit and, in docility to his guidance, hear the cry of the little ones who plead for justice. In this meeting, we sense the weight of the pastoral and ecclesial responsibility that obliges us to discuss together, in a synodal, frank and in-depth manner, how to confront this evil afflicting the Church and humanity. The holy People of God look to us, and expect from us not simple and predictable condemnations, but concrete and effective measures to be undertaken. We need to be concrete. So we begin this process armed with faith and a spirit of great parrhesia, courage and concreteness. As a help, I would share with you some important criteria formulated by the various Episcopal Commissions and Conferences – they came from you and I have organized them somewhat. They are guidelines to assist in our reflection, and they will now be distributed to you. They are a simple point of departure that came from you and now return to you. They are not meant to detract from the creativity needed in this meeting. In your name, I would also like to thank the Pontifical Commission for the Protection of Minors, the Congregation for the Doctrine of the Faith and the members of the Organizing Committee for their outstanding and dedicated work in preparing for this meeting. Many thanks! Finally, I ask the Holy Spirit to sustain us throughout these days, and to help us to turn this evil into an opportunity for awareness and purification. May the Virgin Mary enlighten us as we seek to heal the grave wounds that the scandal of paedophilia has caused, both in the little ones and in believers. Thank you. 21. Where it has not yet been in place, establish a group easily accessible for victims who want to report any crimes. Such an organization should have a certain autonomy with respect to the local ecclesiastical authority and include expert persons (clerics and laity) who know how to express the Church’s attention to those who have been offended by improper attitudes on the part of clerics. *** On this point, Abp. Scicluna noted in the afternoon press briefing that universal Canon law now has the minimum age for marriage for girls at 14 and for boys at 16. He said the Pope wishes the age to be uniformly 16 for both boys and girls, adding that national Episcopal conferences have had the power to change the minimum age, given circumstances and the cultures in their countries.
# # -*- coding: utf-8 -*- # Copyright 2019 Red Hat # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)#!/usr/bin/python """ The nxos l3_interfaces fact class It is in this file the configuration is collected from the device for a given resource, parsed, and the facts tree is populated based on the configuration. """ from __future__ import absolute_import, division, print_function __metaclass__ = type import re from copy import deepcopy from ansible.module_utils.network.common import utils from ansible.module_utils.network.nxos.argspec.l3_interfaces.l3_interfaces import L3_interfacesArgs from ansible.module_utils.network.nxos.utils.utils import get_interface_type, validate_ipv4_addr, validate_ipv6_addr class L3_interfacesFacts(object): """ The nxos l3_interfaces fact class """ def __init__(self, module, subspec='config', options='options'): self._module = module self.argument_spec = L3_interfacesArgs.argument_spec spec = deepcopy(self.argument_spec) if subspec: if options: facts_argument_spec = spec[subspec][options] else: facts_argument_spec = spec[subspec] else: facts_argument_spec = spec self.generated_spec = utils.generate_dict(facts_argument_spec) def populate_facts(self, connection, ansible_facts, data=None): """ Populate the facts for l3_interfaces :param connection: the device connection :param data: previously collected conf :rtype: dictionary :returns: facts """ objs = [] if not data: data = connection.get('show running-config | section ^interface') config = data.split('interface ') for conf in config: conf = conf.strip() if conf: obj = self.render_config(self.generated_spec, conf) if obj and len(obj.keys()) > 1: objs.append(obj) ansible_facts['ansible_network_resources'].pop('l3_interfaces', None) facts = {} if objs: facts['l3_interfaces'] = [] params = utils.validate_config(self.argument_spec, {'config': objs}) for cfg in params['config']: facts['l3_interfaces'].append(utils.remove_empties(cfg)) ansible_facts['ansible_network_resources'].update(facts) return ansible_facts def render_config(self, spec, conf): """ Render config as dictionary structure and delete keys from spec for null values :param spec: The facts tree, generated from the argspec :param conf: The configuration :rtype: dictionary :returns: The generated config """ config = deepcopy(spec) match = re.search(r'^(\S+)', conf) intf = match.group(1) if get_interface_type(intf) == 'unknown': return {} config['name'] = intf ipv4_match = re.compile(r'\n ip address (.*)') matches = ipv4_match.findall(conf) if matches: if validate_ipv4_addr(matches[0]): config['ipv4'] = [] for m in matches: ipv4_conf = m.split() addr = ipv4_conf[0] ipv4_addr = addr if validate_ipv4_addr(addr) else None if ipv4_addr: config_dict = {'address': ipv4_addr} if len(ipv4_conf) > 1: d = ipv4_conf[1] if d == 'secondary': config_dict.update({'secondary': True}) if len(ipv4_conf) == 4: if ipv4_conf[2] == 'tag': config_dict.update({'tag': int(ipv4_conf[-1])}) elif d == 'tag': config_dict.update({'tag': int(ipv4_conf[-1])}) config['ipv4'].append(config_dict) ipv6_match = re.compile(r'\n ipv6 address (.*)') matches = ipv6_match.findall(conf) if matches: if validate_ipv6_addr(matches[0]): config['ipv6'] = [] for m in matches: ipv6_conf = m.split() addr = ipv6_conf[0] ipv6_addr = addr if validate_ipv6_addr(addr) else None if ipv6_addr: config_dict = {'address': ipv6_addr} if len(ipv6_conf) > 1: d = ipv6_conf[1] if d == 'tag': config_dict.update({'tag': int(ipv6_conf[-1])}) config['ipv6'].append(config_dict) return utils.remove_empties(config)
Our CENTERVILLE middle school tutors are top college students and graduates from local area universities. They have an average GPA of 3.5 or higher in their areas of tutoring specialization. At Frog Tutoring, we work with students in all grade levels and our CENTERVILLE private middle school tutors provide customized one on one in-home tutoring through our proven three step approach to academic success. Your CENTERVILLE area middle school tutor will also track student progress through detailed session reports which will be available to you at the end of each tutoring session. If it is okay with you, your tutor will contact your child's teacher, for K-12, to get a more detailed understanding of what they are struggling with and also to make sure that he/she and the teacher are both on the same page in their approach to tackling the problem. Browse our list of qualified middle school tutors below. If you are in need of an middle school tutor in CENTERVILLE, please call us or simply go to the tab above and Request a Tutor and let us help provide the understanding and assistance needed for success.
import random # Some of my assumptions: # - Decks contain four suits with associated colors # - Suits contain thirteen title-like designators # - It doesn't matter if the deck's contents can change in any which way # as long as the initial deck contains the correct 52 cards. # - The random library is sufficient for randomizing things, particularly # when used in the way it is used here. # - The deck should be shuffled upon creation. # - deck_of_cards.py will be imported to whatever application users desire # to use a deck of cards in; the module has no interface but the API. class Deck: def __init__(self): self.all_cards = [] for each_card_index in range(0, 52): self.all_cards.append(Card(card_index=each_card_index)) self.shuffle() def shuffle(self): new_list = [] for each_card_index in range(0, 52): number_of_cards_remaining = (52 - each_card_index) # Unline range(), random.randint() is doubly end-inclusive. # We need indices ranging from 0 to 51, inclusive. # I chose NOT to roll the -1 in which_card_index_to_take # into number_of_cards_remaining's defintion and add this comment # to underline these facts. which_card_index_to_take = (random.randint(0, number_of_cards_remaining) - 1) new_list.append(self.all_cards.pop(which_card_index_to_take)) self.all_cards = new_list def deal_a_card(self): if len(self.all_cards) >= 1: return self.all_cards.pop() # Makes viewing the state of the deck at a glance easier. def __str__(self): # Prints the full name of each Card in the Deck. string_list = [] for each_card in self.all_cards: string_list.append(each_card.proper_name) print_string = "\n".join(string_list) return print_string def list(self): return [each_card.proper_name for each_card in self.all_cards] class Card: def __init__(self, card_index=None): # Support for making random Cards without a Deck: if card_index is None: import random card_index = random.randint(0, 51) # Separate from value; this is its absolute position relative to # the ideal deck, pre-shuffle. # It's logically equivalent to a specific pair of suit and value, # such as the Ace of Spades or the 3 of Hearts. self.card_index = card_index suits = {0: "Club", 1: "Spade", 2: "Heart", 3: "Diamond"} specials = {1: "Ace", 11: "Jack", 12: "Queen", 13: "King"} self.suit = suits[(card_index % 4)] if self.suit == "Club" or self.suit == "Spade": self.color = "black" else: self.color = "red" # +1 because value is 1 through 13; useful for various card games self.value = ((card_index % 13) + 1) if self.value in specials: self.title = specials[self.value] else: self.title = str(self.value) self.proper_name = str(self.title + " of " + self.suit + "s") # Pretty print()ing. def __str__(self): return self.proper_name
Watch video interview here. Joseph Mbura, a chemistry and math teacher at the W.B. Saul High School for Agricultural Sciences in the Roxborough section of Philadelphia has loved gardening since his boyhood days in the Kenyan countryside where crops were grown both for food and a source of income. He started a small organic garden on a section of the school’s spacious grounds to show his students that they, too, can grow their own food. He raises a traditional black bean plant for its succulent leaves which then gets steamed and mixed with onions, tomatoes and spices in a beef or chicken stew. In another plot he has just cleared he will grow “chinsaga,” another green. By Mbura’s side is his daughter Daisy who has her own garden at home and cradled in his arm, his younger daughter, Lily, who he hopes will follow the growing tradition. Watch video here. Ellie Goudie-Averill of Stone Depot Dance Lab performs structured and improvisational dances amidst art installations in the warehouse space of Pieri Creations in the Frankford section of Philadelphia. She talks about how a poetry workshop by Philly poet C.A. Conrad inspired her to write dance instructions for her "Serpent" piece for her part of the bill of "Make it, Break it, Rebuild it," a series of dance performances for the Philadephia Live Arts/Fringe Festival. Averill narrates her first of two dances with a large, branch braided shield-like object, Averill talking about how it's hard to go to sleep with the television on and how 60 some percent of people aged 20 to 37 get their news primarily from Jon Stewart. "Experimental movement artist Zornitsa Stoyanova. Often times theatrical, her collaborations explore non linear stories, movement invention, spoken word, and object installation." From the Philadelphia Live Arts Fringe Festival 2012 program description. When we walked into this performance, Stoyonova, dressed in large inflated, translucent plastic bags including one over her head was making fine and deliberate movements, sometimes just with her hands which drew our focus, trying to elicit meaning or purpose to their transformation. Watch video interview here. York Pa. County Fair - Fun Times! The "Procrastinators" are a troupe of three drummers,out of San Diego who wander the fairgrounds and stage impromptu performances using common objects like water cooler bottles and kitchen pans. They enliven their act acrobatic flairs and frequent dashes of humor. They work for gotdrummers.com, a larger outfit which dispatches drummers across the country for a variety of gigs. A sheep gets meticulously sheared and primped by its owners friends, getting ready to be judged. In a tent is the Amercian Steam Engine Society's antique farm machines show. One large vehicle is a "manure spreader" by Chuck Cusimano of Glen Rock, PA. for politicians. At the front over a mounted toilet is a sign which reads "For BiPartican input only- For Politicians Use Only". Other signs aboard the tractor lampoon politicians like the one with a cartoon of the Democrats' donkey and the Republicans' elephant and the words, "Sure you can trust us, Just ask an American Indian." The Society's 55th annual "Steamorama" runs October 4th through 7th, 2012 in York. In the men's room, two men who describe their work as "self contracting" are stationed at either end and pop up from their chairs to proffer a dry paper towel as soon as someone has finished washing their hands. In between times one will sing and other spray wipe the urinals. A muscular tout carries a hand made poster saying "WHATYA BENCH?" and offering $100 prizes as he snakes his way through the fair, trying to lure people to the staging area where a hunk with a microphone cajoles bystanders to benchpress a 225lb barbell or lift a 500 pound barbell with car tires at either end. I went to a party in West Virginia and it was a guy’s retirement party. And he had this t-shirt on and it said “Glasgow, Scotland” because he was in the military, he was in the Marines, or Navy that one, one of three. And we’re having a few drinks and someone says, you should go up to the microphone, it’s Karaoke and say you’re Kaley’s son from Scotland. I said, “I can’t do that.” He says, “Come on.” So eventually we had a whip around. And the whip around was up to sixty dollars and being Scotch, you can’t turn down that money. So I went up to the microphone. I was like, “It’s finally great to meet my Dad bringing me here to meet at his retirement party.” Half the party knew I was doing it but the other half didn’t. So it was like, “Chhhh!” But it turned out the guy knew, he found out this was going to happen. He stood up, come up and just hugged us for a good five minutes. I was standing there, I didn’t know he knew. So I was kind of thinking this is great, he thinks, this guy doesn’t know. And then his mother comes from nowhere so gives us a hug. “It’s great to meet you now.” “Oh, you too.” And eventually I found out they knew. But later on I’m talking to this girl at the party and getting a bit closer to her. It’s going all right. A guy comes up to me, “You all know she’s your cousin.” And I’m like, yeah. “It’s alright now, you’re in West Virginia.” I thought it was brilliant" As related by a UK Elite soccer coach who, for this story, prefers to be identified as "John Deere."
# -*- coding: utf-8 -*- from __future__ import unicode_literals from raven.utils.testutils import TestCase from raven.base import Client # Some internal stuff to extend the transport layer from raven.transport import Transport from raven.transport.exceptions import DuplicateScheme # Simplify comparing dicts with primitive values: from raven.utils import json import datetime import calendar import pytz import zlib class DummyScheme(Transport): scheme = ['mock'] def __init__(self, parsed_url, timeout=5): self._parsed_url = parsed_url self.timeout = timeout def send(self, data, headers): """ Sends a request to a remote webserver """ self._data = data self._headers = headers class TransportTest(TestCase): def setUp(self): try: Client.register_scheme('mock', DummyScheme) except DuplicateScheme: pass def test_basic_config(self): c = Client( dsn="mock://some_username:some_password@localhost:8143/1?timeout=1", name="test_server" ) assert c.remote.options == { 'timeout': '1', } def test_custom_transport(self): c = Client(dsn="mock://some_username:some_password@localhost:8143/1") data = dict(a=42, b=55, c=list(range(50))) c.send(**data) mock_cls = c._transport_cache['mock://some_username:some_password@localhost:8143/1'].get_transport() expected_message = zlib.decompress(c.encode(data)) actual_message = zlib.decompress(mock_cls._data) # These loads()/dumps() pairs order the dict keys before comparing the string. # See GH504 self.assertEqual( json.dumps(json.loads(expected_message.decode('utf-8')), sort_keys=True), json.dumps(json.loads(actual_message.decode('utf-8')), sort_keys=True) ) def test_build_then_send(self): c = Client( dsn="mock://some_username:some_password@localhost:8143/1", name="test_server") mydate = datetime.datetime(2012, 5, 4, tzinfo=pytz.utc) d = calendar.timegm(mydate.timetuple()) msg = c.build_msg('raven.events.Message', message='foo', date=d) expected = { 'project': '1', 'sentry.interfaces.Message': {'message': 'foo', 'params': ()}, 'server_name': 'test_server', 'level': 40, 'tags': {}, 'time_spent': None, 'timestamp': 1336089600, 'message': 'foo', } # The event_id is always overridden del msg['event_id'] self.assertDictContainsSubset(expected, msg)
Quepo is a non-profit audiovisual entity which works strategic communication in order to create social change. We produce videos, documentaries and fiction to raise awareness of the need for citizen participation and social action. We want to participate in building a more democratic global society, which involves us all in the defense of social justice and respect for human rights and the environment. We contribute with our experience as professionals to put closer social movements and NGOs to civil society through the creation of audiovisual projects with social content. The Observatori del Deute en la Globalització (ODG) is an activist research center of North-South generation of debts between communities within the current process of globalization. From ODG we work to set evidence and to try to reverse the mechanisms that perpetuate impoverishment in the South and North-South imbalances. We want to show the complexities and responsibilities that lie behind issues such as debt or the environmental and social impacts of the investments of spanish transnational corporations in the Global South. To achieve this we do research and monitor policies and actors such as the Spanish Government, the transnational corporations or international organizations. From the results of this investigation we pursue capacity building, public awareness, campaigning and advocacy actions.
# Copyright 2012 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo.config import cfg from oslo.db import exception as db_exc from oslo.utils import excutils from sqlalchemy import exc as sql_exc from sqlalchemy.orm import exc as sa_exc import webob.exc from neutron.api import extensions as neutron_extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import base from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.common import utils from neutron import context as q_context from neutron.db import agentschedulers_db from neutron.db import allowedaddresspairs_db as addr_pair_db from neutron.db import db_base_plugin_v2 from neutron.db import external_net_db from neutron.db import extraroute_db from neutron.db import l3_db from neutron.db import l3_dvr_db from neutron.db import l3_gwmode_db from neutron.db import models_v2 from neutron.db import portbindings_db from neutron.db import portsecurity_db from neutron.db import quota_db # noqa from neutron.db import securitygroups_db from neutron.extensions import allowedaddresspairs as addr_pair from neutron.extensions import external_net as ext_net_extn from neutron.extensions import extraroute from neutron.extensions import l3 from neutron.extensions import multiprovidernet as mpnet from neutron.extensions import portbindings as pbin from neutron.extensions import portsecurity as psec from neutron.extensions import providernet as pnet from neutron.extensions import securitygroup as ext_sg from neutron.i18n import _LE, _LI, _LW from neutron.openstack.common import lockutils from neutron.openstack.common import log as logging from neutron.plugins.common import constants as plugin_const from neutron.plugins import vmware from neutron.plugins.vmware.api_client import exception as api_exc from neutron.plugins.vmware.common import config # noqa from neutron.plugins.vmware.common import exceptions as nsx_exc from neutron.plugins.vmware.common import nsx_utils from neutron.plugins.vmware.common import securitygroups as sg_utils from neutron.plugins.vmware.common import sync from neutron.plugins.vmware.common import utils as c_utils from neutron.plugins.vmware.dbexts import db as nsx_db from neutron.plugins.vmware.dbexts import maclearning as mac_db from neutron.plugins.vmware.dbexts import networkgw_db from neutron.plugins.vmware.dbexts import qos_db from neutron.plugins.vmware import dhcpmeta_modes from neutron.plugins.vmware.extensions import maclearning as mac_ext from neutron.plugins.vmware.extensions import networkgw from neutron.plugins.vmware.extensions import qos from neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib from neutron.plugins.vmware.nsxlib import queue as queuelib from neutron.plugins.vmware.nsxlib import router as routerlib from neutron.plugins.vmware.nsxlib import secgroup as secgrouplib from neutron.plugins.vmware.nsxlib import switch as switchlib LOG = logging.getLogger(__name__) NSX_NOSNAT_RULES_ORDER = 10 NSX_FLOATINGIP_NAT_RULES_ORDER = 224 NSX_EXTGW_NAT_RULES_ORDER = 255 NSX_DEFAULT_NEXTHOP = '1.1.1.1' class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin, agentschedulers_db.DhcpAgentSchedulerDbMixin, db_base_plugin_v2.NeutronDbPluginV2, dhcpmeta_modes.DhcpMetadataAccess, l3_dvr_db.L3_NAT_with_dvr_db_mixin, external_net_db.External_net_db_mixin, extraroute_db.ExtraRoute_db_mixin, l3_gwmode_db.L3_NAT_db_mixin, mac_db.MacLearningDbMixin, networkgw_db.NetworkGatewayMixin, portbindings_db.PortBindingMixin, portsecurity_db.PortSecurityDbMixin, qos_db.QoSDbMixin, securitygroups_db.SecurityGroupDbMixin): supported_extension_aliases = ["allowed-address-pairs", "binding", "dvr", "ext-gw-mode", "extraroute", "mac-learning", "multi-provider", "network-gateway", "nvp-qos", "port-security", "provider", "qos-queue", "quotas", "external-net", "router", "security-group"] __native_bulk_support = True __native_pagination_support = True __native_sorting_support = True # Map nova zones to cluster for easy retrieval novazone_cluster_map = {} def __init__(self): super(NsxPluginV2, self).__init__() config.validate_config_options() # TODO(salv-orlando): Replace These dicts with # collections.defaultdict for better handling of default values # Routines for managing logical ports in NSX self.port_special_owners = [l3_db.DEVICE_OWNER_ROUTER_GW, l3_db.DEVICE_OWNER_ROUTER_INTF] self._port_drivers = { 'create': {l3_db.DEVICE_OWNER_ROUTER_GW: self._nsx_create_ext_gw_port, l3_db.DEVICE_OWNER_FLOATINGIP: self._nsx_create_fip_port, l3_db.DEVICE_OWNER_ROUTER_INTF: self._nsx_create_router_port, networkgw_db.DEVICE_OWNER_NET_GW_INTF: self._nsx_create_l2_gw_port, 'default': self._nsx_create_port}, 'delete': {l3_db.DEVICE_OWNER_ROUTER_GW: self._nsx_delete_ext_gw_port, l3_db.DEVICE_OWNER_ROUTER_INTF: self._nsx_delete_router_port, l3_db.DEVICE_OWNER_FLOATINGIP: self._nsx_delete_fip_port, networkgw_db.DEVICE_OWNER_NET_GW_INTF: self._nsx_delete_port, 'default': self._nsx_delete_port} } neutron_extensions.append_api_extensions_path([vmware.NSX_EXT_PATH]) self.nsx_opts = cfg.CONF.NSX self.nsx_sync_opts = cfg.CONF.NSX_SYNC self.cluster = nsx_utils.create_nsx_cluster( cfg.CONF, self.nsx_opts.concurrent_connections, self.nsx_opts.nsx_gen_timeout) self.base_binding_dict = { pbin.VIF_TYPE: pbin.VIF_TYPE_OVS, pbin.VIF_DETAILS: { # TODO(rkukura): Replace with new VIF security details pbin.CAP_PORT_FILTER: 'security-group' in self.supported_extension_aliases}} self._extend_fault_map() self.setup_dhcpmeta_access() # Set this flag to false as the default gateway has not # been yet updated from the config file self._is_default_net_gw_in_sync = False # Create a synchronizer instance for backend sync self._synchronizer = sync.NsxSynchronizer( self.safe_reference, self.cluster, self.nsx_sync_opts.state_sync_interval, self.nsx_sync_opts.min_sync_req_delay, self.nsx_sync_opts.min_chunk_size, self.nsx_sync_opts.max_random_sync_delay) def _ensure_default_network_gateway(self): if self._is_default_net_gw_in_sync: return # Add the gw in the db as default, and unset any previous default def_l2_gw_uuid = self.cluster.default_l2_gw_service_uuid try: ctx = q_context.get_admin_context() self._unset_default_network_gateways(ctx) if not def_l2_gw_uuid: return try: def_network_gw = self._get_network_gateway(ctx, def_l2_gw_uuid) except networkgw_db.GatewayNotFound: # Create in DB only - don't go to backend def_gw_data = {'id': def_l2_gw_uuid, 'name': 'default L2 gateway service', 'devices': []} gw_res_name = networkgw.GATEWAY_RESOURCE_NAME.replace('-', '_') def_network_gw = super( NsxPluginV2, self).create_network_gateway( ctx, {gw_res_name: def_gw_data}) # In any case set is as default self._set_default_network_gateway(ctx, def_network_gw['id']) # Ensure this method is executed only once self._is_default_net_gw_in_sync = True except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Unable to process default l2 gw service: " "%s"), def_l2_gw_uuid) def _build_ip_address_list(self, context, fixed_ips, subnet_ids=None): """Build ip_addresses data structure for logical router port. No need to perform validation on IPs - this has already been done in the l3_db mixin class. """ ip_addresses = [] for ip in fixed_ips: if not subnet_ids or (ip['subnet_id'] in subnet_ids): subnet = self._get_subnet(context, ip['subnet_id']) ip_prefix = '%s/%s' % (ip['ip_address'], subnet['cidr'].split('/')[1]) ip_addresses.append(ip_prefix) return ip_addresses def _create_and_attach_router_port(self, cluster, context, nsx_router_id, port_data, attachment_type, attachment, attachment_vlan=None, subnet_ids=None): # Use a fake IP address if gateway port is not 'real' ip_addresses = (port_data.get('fake_ext_gw') and ['0.0.0.0/31'] or self._build_ip_address_list(context, port_data['fixed_ips'], subnet_ids)) try: lrouter_port = routerlib.create_router_lport( cluster, nsx_router_id, port_data.get('tenant_id', 'fake'), port_data.get('id', 'fake'), port_data.get('name', 'fake'), port_data.get('admin_state_up', True), ip_addresses, port_data.get('mac_address')) LOG.debug("Created NSX router port:%s", lrouter_port['uuid']) except api_exc.NsxApiException: LOG.exception(_LE("Unable to create port on NSX logical router " "%s"), nsx_router_id) raise nsx_exc.NsxPluginException( err_msg=_("Unable to create logical router port for neutron " "port id %(port_id)s on router %(nsx_router_id)s") % {'port_id': port_data.get('id'), 'nsx_router_id': nsx_router_id}) self._update_router_port_attachment(cluster, context, nsx_router_id, port_data, lrouter_port['uuid'], attachment_type, attachment, attachment_vlan) return lrouter_port def _update_router_gw_info(self, context, router_id, info): # NOTE(salvatore-orlando): We need to worry about rollback of NSX # configuration in case of failures in the process # Ref. LP bug 1102301 router = self._get_router(context, router_id) # Check whether SNAT rule update should be triggered # NSX also supports multiple external networks so there is also # the possibility that NAT rules should be replaced current_ext_net_id = router.gw_port_id and router.gw_port.network_id new_ext_net_id = info and info.get('network_id') # SNAT should be enabled unless info['enable_snat'] is # explicitly set to false enable_snat = new_ext_net_id and info.get('enable_snat', True) # Remove if ext net removed, changed, or if snat disabled remove_snat_rules = (current_ext_net_id and new_ext_net_id != current_ext_net_id or router.enable_snat and not enable_snat) # Add rules if snat is enabled, and if either the external network # changed or snat was previously disabled # NOTE: enable_snat == True implies new_ext_net_id != None add_snat_rules = (enable_snat and (new_ext_net_id != current_ext_net_id or not router.enable_snat)) router = super(NsxPluginV2, self)._update_router_gw_info( context, router_id, info, router=router) # Add/Remove SNAT rules as needed # Create an elevated context for dealing with metadata access # cidrs which are created within admin context ctx_elevated = context.elevated() if remove_snat_rules or add_snat_rules: cidrs = self._find_router_subnets_cidrs(ctx_elevated, router_id) nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, router_id) if remove_snat_rules: # Be safe and concede NAT rules might not exist. # Therefore use min_num_expected=0 for cidr in cidrs: routerlib.delete_nat_rules_by_match( self.cluster, nsx_router_id, "SourceNatRule", max_num_expected=1, min_num_expected=0, raise_on_len_mismatch=False, source_ip_addresses=cidr) if add_snat_rules: ip_addresses = self._build_ip_address_list( ctx_elevated, router.gw_port['fixed_ips']) # Set the SNAT rule for each subnet (only first IP) for cidr in cidrs: cidr_prefix = int(cidr.split('/')[1]) routerlib.create_lrouter_snat_rule( self.cluster, nsx_router_id, ip_addresses[0].split('/')[0], ip_addresses[0].split('/')[0], order=NSX_EXTGW_NAT_RULES_ORDER - cidr_prefix, match_criteria={'source_ip_addresses': cidr}) def _update_router_port_attachment(self, cluster, context, nsx_router_id, port_data, nsx_router_port_id, attachment_type, attachment, attachment_vlan=None): if not nsx_router_port_id: nsx_router_port_id = self._find_router_gw_port(context, port_data) try: routerlib.plug_router_port_attachment(cluster, nsx_router_id, nsx_router_port_id, attachment, attachment_type, attachment_vlan) LOG.debug("Attached %(att)s to NSX router port %(port)s", {'att': attachment, 'port': nsx_router_port_id}) except api_exc.NsxApiException: # Must remove NSX logical port routerlib.delete_router_lport(cluster, nsx_router_id, nsx_router_port_id) LOG.exception(_LE("Unable to plug attachment in NSX logical " "router port %(r_port_id)s, associated with " "Neutron %(q_port_id)s"), {'r_port_id': nsx_router_port_id, 'q_port_id': port_data.get('id')}) raise nsx_exc.NsxPluginException( err_msg=(_("Unable to plug attachment in router port " "%(r_port_id)s for neutron port id %(q_port_id)s " "on router %(router_id)s") % {'r_port_id': nsx_router_port_id, 'q_port_id': port_data.get('id'), 'router_id': nsx_router_id})) def _get_port_by_device_id(self, context, device_id, device_owner): """Retrieve ports associated with a specific device id. Used for retrieving all neutron ports attached to a given router. """ port_qry = context.session.query(models_v2.Port) return port_qry.filter_by( device_id=device_id, device_owner=device_owner,).all() def _find_router_subnets_cidrs(self, context, router_id): """Retrieve subnets attached to the specified router.""" ports = self._get_port_by_device_id(context, router_id, l3_db.DEVICE_OWNER_ROUTER_INTF) # No need to check for overlapping CIDRs cidrs = [] for port in ports: for ip in port.get('fixed_ips', []): cidrs.append(self._get_subnet(context, ip.subnet_id).cidr) return cidrs def _nsx_find_lswitch_for_port(self, context, port_data): network = self._get_network(context, port_data['network_id']) network_bindings = nsx_db.get_network_bindings( context.session, port_data['network_id']) max_ports = self.nsx_opts.max_lp_per_overlay_ls allow_extra_lswitches = False for network_binding in network_bindings: if network_binding.binding_type in (c_utils.NetworkTypes.FLAT, c_utils.NetworkTypes.VLAN): max_ports = self.nsx_opts.max_lp_per_bridged_ls allow_extra_lswitches = True break try: return self._handle_lswitch_selection( context, self.cluster, network, network_bindings, max_ports, allow_extra_lswitches) except api_exc.NsxApiException: err_desc = _("An exception occurred while selecting logical " "switch for the port") LOG.exception(err_desc) raise nsx_exc.NsxPluginException(err_msg=err_desc) def _nsx_create_port_helper(self, session, ls_uuid, port_data, do_port_security=True): # Convert Neutron security groups identifiers into NSX security # profiles identifiers nsx_sec_profile_ids = [ nsx_utils.get_nsx_security_group_id( session, self.cluster, neutron_sg_id) for neutron_sg_id in (port_data[ext_sg.SECURITYGROUPS] or [])] return switchlib.create_lport(self.cluster, ls_uuid, port_data['tenant_id'], port_data['id'], port_data['name'], port_data['device_id'], port_data['admin_state_up'], port_data['mac_address'], port_data['fixed_ips'], port_data[psec.PORTSECURITY], nsx_sec_profile_ids, port_data.get(qos.QUEUE), port_data.get(mac_ext.MAC_LEARNING), port_data.get(addr_pair.ADDRESS_PAIRS)) def _handle_create_port_exception(self, context, port_id, ls_uuid, lp_uuid): with excutils.save_and_reraise_exception(): # rollback nsx logical port only if it was successfully # created on NSX. Should this command fail the original # exception will be raised. if lp_uuid: # Remove orphaned port from NSX switchlib.delete_port(self.cluster, ls_uuid, lp_uuid) # rollback the neutron-nsx port mapping nsx_db.delete_neutron_nsx_port_mapping(context.session, port_id) LOG.exception(_LE("An exception occurred while creating the " "neutron port %s on the NSX plaform"), port_id) def _nsx_create_port(self, context, port_data): """Driver for creating a logical switch port on NSX platform.""" # FIXME(salvatore-orlando): On the NSX platform we do not really have # external networks. So if as user tries and create a "regular" VIF # port on an external network we are unable to actually create. # However, in order to not break unit tests, we need to still create # the DB object and return success if self._network_is_external(context, port_data['network_id']): LOG.info(_LI("NSX plugin does not support regular VIF ports on " "external networks. Port %s will be down."), port_data['network_id']) # No need to actually update the DB state - the default is down return port_data lport = None selected_lswitch = None try: selected_lswitch = self._nsx_find_lswitch_for_port(context, port_data) lport = self._nsx_create_port_helper(context.session, selected_lswitch['uuid'], port_data, True) nsx_db.add_neutron_nsx_port_mapping( context.session, port_data['id'], selected_lswitch['uuid'], lport['uuid']) if port_data['device_owner'] not in self.port_special_owners: switchlib.plug_vif_interface( self.cluster, selected_lswitch['uuid'], lport['uuid'], "VifAttachment", port_data['id']) LOG.debug("_nsx_create_port completed for port %(name)s " "on network %(network_id)s. The new port id is " "%(id)s.", port_data) except (api_exc.NsxApiException, n_exc.NeutronException): self._handle_create_port_exception( context, port_data['id'], selected_lswitch and selected_lswitch['uuid'], lport and lport['uuid']) except db_exc.DBError as e: if (port_data['device_owner'] == constants.DEVICE_OWNER_DHCP and isinstance(e.inner_exception, sql_exc.IntegrityError)): LOG.warning( _LW("Concurrent network deletion detected; Back-end " "Port %(nsx_id)s creation to be rolled back for " "Neutron port: %(neutron_id)s"), {'nsx_id': lport['uuid'], 'neutron_id': port_data['id']}) if selected_lswitch and lport: try: switchlib.delete_port(self.cluster, selected_lswitch['uuid'], lport['uuid']) except n_exc.NotFound: LOG.debug("NSX Port %s already gone", lport['uuid']) def _nsx_delete_port(self, context, port_data): # FIXME(salvatore-orlando): On the NSX platform we do not really have # external networks. So deleting regular ports from external networks # does not make sense. However we cannot raise as this would break # unit tests. if self._network_is_external(context, port_data['network_id']): LOG.info(_LI("NSX plugin does not support regular VIF ports on " "external networks. Port %s will be down."), port_data['network_id']) return nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id( context.session, self.cluster, port_data['id']) if not nsx_port_id: LOG.debug("Port '%s' was already deleted on NSX platform", id) return # TODO(bgh): if this is a bridged network and the lswitch we just got # back will have zero ports after the delete we should garbage collect # the lswitch. try: switchlib.delete_port(self.cluster, nsx_switch_id, nsx_port_id) LOG.debug("_nsx_delete_port completed for port %(port_id)s " "on network %(net_id)s", {'port_id': port_data['id'], 'net_id': port_data['network_id']}) except n_exc.NotFound: LOG.warning(_LW("Port %s not found in NSX"), port_data['id']) def _nsx_delete_router_port(self, context, port_data): # Delete logical router port nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, port_data['device_id']) nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id( context.session, self.cluster, port_data['id']) if not nsx_port_id: LOG.warn( _LW("Neutron port %(port_id)s not found on NSX backend. " "Terminating delete operation. A dangling router port " "might have been left on router %(router_id)s"), {'port_id': port_data['id'], 'router_id': nsx_router_id}) return try: routerlib.delete_peer_router_lport(self.cluster, nsx_router_id, nsx_switch_id, nsx_port_id) except api_exc.NsxApiException: # Do not raise because the issue might as well be that the # router has already been deleted, so there would be nothing # to do here LOG.exception(_LE("Ignoring exception as this means the peer " "for port '%s' has already been deleted."), nsx_port_id) # Delete logical switch port self._nsx_delete_port(context, port_data) def _nsx_create_router_port(self, context, port_data): """Driver for creating a switch port to be connected to a router.""" # No router ports on external networks! if self._network_is_external(context, port_data['network_id']): raise nsx_exc.NsxPluginException( err_msg=(_("It is not allowed to create router interface " "ports on external networks as '%s'") % port_data['network_id'])) ls_port = None selected_lswitch = None try: selected_lswitch = self._nsx_find_lswitch_for_port( context, port_data) # Do not apply port security here! ls_port = self._nsx_create_port_helper( context.session, selected_lswitch['uuid'], port_data, False) # Assuming subnet being attached is on first fixed ip # element in port data subnet_id = port_data['fixed_ips'][0]['subnet_id'] nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, port_data['device_id']) # Create peer port on logical router self._create_and_attach_router_port( self.cluster, context, nsx_router_id, port_data, "PatchAttachment", ls_port['uuid'], subnet_ids=[subnet_id]) nsx_db.add_neutron_nsx_port_mapping( context.session, port_data['id'], selected_lswitch['uuid'], ls_port['uuid']) LOG.debug("_nsx_create_router_port completed for port " "%(name)s on network %(network_id)s. The new " "port id is %(id)s.", port_data) except (api_exc.NsxApiException, n_exc.NeutronException): self._handle_create_port_exception( context, port_data['id'], selected_lswitch and selected_lswitch['uuid'], ls_port and ls_port['uuid']) def _find_router_gw_port(self, context, port_data): router_id = port_data['device_id'] if not router_id: raise n_exc.BadRequest(_("device_id field must be populated in " "order to create an external gateway " "port for network %s"), port_data['network_id']) nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, router_id) lr_port = routerlib.find_router_gw_port(context, self.cluster, nsx_router_id) if not lr_port: raise nsx_exc.NsxPluginException( err_msg=(_("The gateway port for the NSX router %s " "was not found on the backend") % nsx_router_id)) return lr_port @lockutils.synchronized('vmware', 'neutron-') def _nsx_create_ext_gw_port(self, context, port_data): """Driver for creating an external gateway port on NSX platform.""" # TODO(salvatore-orlando): Handle NSX resource # rollback when something goes not quite as expected lr_port = self._find_router_gw_port(context, port_data) ip_addresses = self._build_ip_address_list(context, port_data['fixed_ips']) # This operation actually always updates a NSX logical port # instead of creating one. This is because the gateway port # is created at the same time as the NSX logical router, otherwise # the fabric status of the NSX router will be down. # admin_status should always be up for the gateway port # regardless of what the user specifies in neutron nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, port_data['device_id']) routerlib.update_router_lport(self.cluster, nsx_router_id, lr_port['uuid'], port_data['tenant_id'], port_data['id'], port_data['name'], True, ip_addresses) ext_network = self.get_network(context, port_data['network_id']) if ext_network.get(pnet.NETWORK_TYPE) == c_utils.NetworkTypes.L3_EXT: # Update attachment physical_network = (ext_network[pnet.PHYSICAL_NETWORK] or self.cluster.default_l3_gw_service_uuid) self._update_router_port_attachment( self.cluster, context, nsx_router_id, port_data, lr_port['uuid'], "L3GatewayAttachment", physical_network, ext_network[pnet.SEGMENTATION_ID]) LOG.debug("_nsx_create_ext_gw_port completed on external network " "%(ext_net_id)s, attached to router:%(router_id)s. " "NSX port id is %(nsx_port_id)s", {'ext_net_id': port_data['network_id'], 'router_id': nsx_router_id, 'nsx_port_id': lr_port['uuid']}) @lockutils.synchronized('vmware', 'neutron-') def _nsx_delete_ext_gw_port(self, context, port_data): lr_port = self._find_router_gw_port(context, port_data) # TODO(salvatore-orlando): Handle NSX resource # rollback when something goes not quite as expected try: # Delete is actually never a real delete, otherwise the NSX # logical router will stop working router_id = port_data['device_id'] nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, router_id) routerlib.update_router_lport(self.cluster, nsx_router_id, lr_port['uuid'], port_data['tenant_id'], port_data['id'], port_data['name'], True, ['0.0.0.0/31']) # Reset attachment self._update_router_port_attachment( self.cluster, context, nsx_router_id, port_data, lr_port['uuid'], "L3GatewayAttachment", self.cluster.default_l3_gw_service_uuid) except api_exc.ResourceNotFound: raise nsx_exc.NsxPluginException( err_msg=_("Logical router resource %s not found " "on NSX platform") % router_id) except api_exc.NsxApiException: raise nsx_exc.NsxPluginException( err_msg=_("Unable to update logical router" "on NSX Platform")) LOG.debug("_nsx_delete_ext_gw_port completed on external network " "%(ext_net_id)s, attached to NSX router:%(router_id)s", {'ext_net_id': port_data['network_id'], 'router_id': nsx_router_id}) def _nsx_create_l2_gw_port(self, context, port_data): """Create a switch port, and attach it to a L2 gateway attachment.""" # FIXME(salvatore-orlando): On the NSX platform we do not really have # external networks. So if as user tries and create a "regular" VIF # port on an external network we are unable to actually create. # However, in order to not break unit tests, we need to still create # the DB object and return success if self._network_is_external(context, port_data['network_id']): LOG.info(_LI("NSX plugin does not support regular VIF ports on " "external networks. Port %s will be down."), port_data['network_id']) # No need to actually update the DB state - the default is down return port_data lport = None try: selected_lswitch = self._nsx_find_lswitch_for_port( context, port_data) lport = self._nsx_create_port_helper( context.session, selected_lswitch['uuid'], port_data, True) nsx_db.add_neutron_nsx_port_mapping( context.session, port_data['id'], selected_lswitch['uuid'], lport['uuid']) l2gwlib.plug_l2_gw_service( self.cluster, selected_lswitch['uuid'], lport['uuid'], port_data['device_id'], int(port_data.get('gw:segmentation_id') or 0)) except Exception: with excutils.save_and_reraise_exception(): if lport: switchlib.delete_port(self.cluster, selected_lswitch['uuid'], lport['uuid']) LOG.debug("_nsx_create_l2_gw_port completed for port %(name)s " "on network %(network_id)s. The new port id " "is %(id)s.", port_data) def _nsx_create_fip_port(self, context, port_data): # As we do not create ports for floating IPs in NSX, # this is a no-op driver pass def _nsx_delete_fip_port(self, context, port_data): # As we do not create ports for floating IPs in NSX, # this is a no-op driver pass def _extend_fault_map(self): """Extends the Neutron Fault Map. Exceptions specific to the NSX Plugin are mapped to standard HTTP Exceptions. """ base.FAULT_MAP.update({nsx_exc.InvalidNovaZone: webob.exc.HTTPBadRequest, nsx_exc.NoMorePortsException: webob.exc.HTTPBadRequest, nsx_exc.MaintenanceInProgress: webob.exc.HTTPServiceUnavailable, nsx_exc.InvalidSecurityCertificate: webob.exc.HTTPBadRequest}) def _validate_provider_create(self, context, network): segments = network.get(mpnet.SEGMENTS) if not attr.is_attr_set(segments): return mpnet.check_duplicate_segments(segments) for segment in segments: network_type = segment.get(pnet.NETWORK_TYPE) physical_network = segment.get(pnet.PHYSICAL_NETWORK) physical_network_set = attr.is_attr_set(physical_network) segmentation_id = segment.get(pnet.SEGMENTATION_ID) network_type_set = attr.is_attr_set(network_type) segmentation_id_set = attr.is_attr_set(segmentation_id) # If the physical_network_uuid isn't passed in use the default one. if not physical_network_set: physical_network = cfg.CONF.default_tz_uuid err_msg = None if not network_type_set: err_msg = _("%s required") % pnet.NETWORK_TYPE elif network_type in (c_utils.NetworkTypes.GRE, c_utils.NetworkTypes.STT, c_utils.NetworkTypes.FLAT): if segmentation_id_set: err_msg = _("Segmentation ID cannot be specified with " "flat network type") elif network_type == c_utils.NetworkTypes.VLAN: if not segmentation_id_set: err_msg = _("Segmentation ID must be specified with " "vlan network type") elif (segmentation_id_set and not utils.is_valid_vlan_tag(segmentation_id)): err_msg = (_("%(segmentation_id)s out of range " "(%(min_id)s through %(max_id)s)") % {'segmentation_id': segmentation_id, 'min_id': constants.MIN_VLAN_TAG, 'max_id': constants.MAX_VLAN_TAG}) else: # Verify segment is not already allocated bindings = ( nsx_db.get_network_bindings_by_vlanid_and_physical_net( context.session, segmentation_id, physical_network) ) if bindings: raise n_exc.VlanIdInUse( vlan_id=segmentation_id, physical_network=physical_network) elif network_type == c_utils.NetworkTypes.L3_EXT: if (segmentation_id_set and not utils.is_valid_vlan_tag(segmentation_id)): err_msg = (_("%(segmentation_id)s out of range " "(%(min_id)s through %(max_id)s)") % {'segmentation_id': segmentation_id, 'min_id': constants.MIN_VLAN_TAG, 'max_id': constants.MAX_VLAN_TAG}) else: err_msg = (_("%(net_type_param)s %(net_type_value)s not " "supported") % {'net_type_param': pnet.NETWORK_TYPE, 'net_type_value': network_type}) if err_msg: raise n_exc.InvalidInput(error_message=err_msg) # TODO(salvatore-orlando): Validate tranport zone uuid # which should be specified in physical_network def _extend_network_dict_provider(self, context, network, multiprovider=None, bindings=None): if not bindings: bindings = nsx_db.get_network_bindings(context.session, network['id']) if not multiprovider: multiprovider = nsx_db.is_multiprovider_network(context.session, network['id']) # With NSX plugin 'normal' overlay networks will have no binding # TODO(salvatore-orlando) make sure users can specify a distinct # phy_uuid as 'provider network' for STT net type if bindings: if not multiprovider: # network came in through provider networks api network[pnet.NETWORK_TYPE] = bindings[0].binding_type network[pnet.PHYSICAL_NETWORK] = bindings[0].phy_uuid network[pnet.SEGMENTATION_ID] = bindings[0].vlan_id else: # network come in though multiprovider networks api network[mpnet.SEGMENTS] = [ {pnet.NETWORK_TYPE: binding.binding_type, pnet.PHYSICAL_NETWORK: binding.phy_uuid, pnet.SEGMENTATION_ID: binding.vlan_id} for binding in bindings] def extend_port_dict_binding(self, port_res, port_db): super(NsxPluginV2, self).extend_port_dict_binding(port_res, port_db) port_res[pbin.VNIC_TYPE] = pbin.VNIC_NORMAL def _handle_lswitch_selection(self, context, cluster, network, network_bindings, max_ports, allow_extra_lswitches): lswitches = nsx_utils.fetch_nsx_switches( context.session, cluster, network.id) try: return [ls for ls in lswitches if (ls['_relations']['LogicalSwitchStatus'] ['lport_count'] < max_ports)].pop(0) except IndexError: # Too bad, no switch available LOG.debug("No switch has available ports (%d checked)", len(lswitches)) if allow_extra_lswitches: # The 'main' logical switch is either the only one available # or the one where the 'multi_lswitch' tag was set while lswitches: main_ls = lswitches.pop(0) tag_dict = dict((x['scope'], x['tag']) for x in main_ls['tags']) if 'multi_lswitch' in tag_dict: break else: # by construction this statement is hit if there is only one # logical switch and the multi_lswitch tag has not been set. # The tag must therefore be added. tags = main_ls['tags'] tags.append({'tag': 'True', 'scope': 'multi_lswitch'}) switchlib.update_lswitch(cluster, main_ls['uuid'], main_ls['display_name'], network['tenant_id'], tags=tags) transport_zone_config = self._convert_to_nsx_transport_zones( cluster, network, bindings=network_bindings) selected_lswitch = switchlib.create_lswitch( cluster, network.id, network.tenant_id, "%s-ext-%s" % (network.name, len(lswitches)), transport_zone_config) # add a mapping between the neutron network and the newly # created logical switch nsx_db.add_neutron_nsx_network_mapping( context.session, network.id, selected_lswitch['uuid']) return selected_lswitch else: LOG.error(_LE("Maximum number of logical ports reached for " "logical network %s"), network.id) raise nsx_exc.NoMorePortsException(network=network.id) def _convert_to_nsx_transport_zones(self, cluster, network=None, bindings=None): # TODO(salv-orlando): Remove this method and call nsx-utils direct return nsx_utils.convert_to_nsx_transport_zones( cluster.default_tz_uuid, network, bindings, default_transport_type=cfg.CONF.NSX.default_transport_type) def _convert_to_transport_zones_dict(self, network): """Converts the provider request body to multiprovider. Returns: True if request is multiprovider False if provider and None if neither. """ if any(attr.is_attr_set(network.get(f)) for f in (pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID)): if attr.is_attr_set(network.get(mpnet.SEGMENTS)): raise mpnet.SegmentsSetInConjunctionWithProviders() # convert to transport zone list network[mpnet.SEGMENTS] = [ {pnet.NETWORK_TYPE: network[pnet.NETWORK_TYPE], pnet.PHYSICAL_NETWORK: network[pnet.PHYSICAL_NETWORK], pnet.SEGMENTATION_ID: network[pnet.SEGMENTATION_ID]}] del network[pnet.NETWORK_TYPE] del network[pnet.PHYSICAL_NETWORK] del network[pnet.SEGMENTATION_ID] return False if attr.is_attr_set(mpnet.SEGMENTS): return True def create_network(self, context, network): net_data = network['network'] tenant_id = self._get_tenant_id_for_create(context, net_data) self._ensure_default_security_group(context, tenant_id) # Process the provider network extension provider_type = self._convert_to_transport_zones_dict(net_data) self._validate_provider_create(context, net_data) # Replace ATTR_NOT_SPECIFIED with None before sending to NSX for key, value in network['network'].iteritems(): if value is attr.ATTR_NOT_SPECIFIED: net_data[key] = None # FIXME(arosen) implement admin_state_up = False in NSX if net_data['admin_state_up'] is False: LOG.warning(_LW("Network with admin_state_up=False are not yet " "supported by this plugin. Ignoring setting for " "network %s"), net_data.get('name', '<unknown>')) transport_zone_config = self._convert_to_nsx_transport_zones( self.cluster, net_data) external = net_data.get(ext_net_extn.EXTERNAL) # NOTE(salv-orlando): Pre-generating uuid for Neutron # network. This will be removed once the network create operation # becomes an asynchronous task net_data['id'] = str(uuid.uuid4()) if (not attr.is_attr_set(external) or attr.is_attr_set(external) and not external): lswitch = switchlib.create_lswitch( self.cluster, net_data['id'], tenant_id, net_data.get('name'), transport_zone_config, shared=net_data.get(attr.SHARED)) with context.session.begin(subtransactions=True): new_net = super(NsxPluginV2, self).create_network(context, network) # Process port security extension self._process_network_port_security_create( context, net_data, new_net) # DB Operations for setting the network as external self._process_l3_create(context, new_net, net_data) # Process QoS queue extension net_queue_id = net_data.get(qos.QUEUE) if net_queue_id: # Raises if not found self.get_qos_queue(context, net_queue_id) self._process_network_queue_mapping( context, new_net, net_queue_id) # Add mapping between neutron network and NSX switch if (not attr.is_attr_set(external) or attr.is_attr_set(external) and not external): nsx_db.add_neutron_nsx_network_mapping( context.session, new_net['id'], lswitch['uuid']) if (net_data.get(mpnet.SEGMENTS) and isinstance(provider_type, bool)): net_bindings = [] for tz in net_data[mpnet.SEGMENTS]: segmentation_id = tz.get(pnet.SEGMENTATION_ID, 0) segmentation_id_set = attr.is_attr_set(segmentation_id) if not segmentation_id_set: segmentation_id = 0 net_bindings.append(nsx_db.add_network_binding( context.session, new_net['id'], tz.get(pnet.NETWORK_TYPE), tz.get(pnet.PHYSICAL_NETWORK), segmentation_id)) if provider_type: nsx_db.set_multiprovider_network(context.session, new_net['id']) self._extend_network_dict_provider(context, new_net, provider_type, net_bindings) self.handle_network_dhcp_access(context, new_net, action='create_network') return new_net def delete_network(self, context, id): external = self._network_is_external(context, id) # Before removing entry from Neutron DB, retrieve NSX switch # identifiers for removing them from backend if not external: lswitch_ids = nsx_utils.get_nsx_switch_ids( context.session, self.cluster, id) with context.session.begin(subtransactions=True): self._process_l3_delete(context, id) nsx_db.delete_network_bindings(context.session, id) super(NsxPluginV2, self).delete_network(context, id) # Do not go to NSX for external networks if not external: try: switchlib.delete_networks(self.cluster, id, lswitch_ids) except n_exc.NotFound: LOG.warning(_LW("The following logical switches were not " "found on the NSX backend:%s"), lswitch_ids) self.handle_network_dhcp_access(context, id, action='delete_network') LOG.debug("Delete network complete for network: %s", id) def get_network(self, context, id, fields=None): with context.session.begin(subtransactions=True): # goto to the plugin DB and fetch the network network = self._get_network(context, id) if (self.nsx_sync_opts.always_read_status or fields and 'status' in fields): # External networks are not backed by nsx lswitches if not network.external: # Perform explicit state synchronization self._synchronizer.synchronize_network(context, network) # Don't do field selection here otherwise we won't be able # to add provider networks fields net_result = self._make_network_dict(network) self._extend_network_dict_provider(context, net_result) return self._fields(net_result, fields) def get_networks(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): filters = filters or {} with context.session.begin(subtransactions=True): networks = ( super(NsxPluginV2, self).get_networks( context, filters, fields, sorts, limit, marker, page_reverse)) for net in networks: self._extend_network_dict_provider(context, net) return [self._fields(network, fields) for network in networks] def update_network(self, context, id, network): pnet._raise_if_updates_provider_attributes(network['network']) if network["network"].get("admin_state_up") is False: raise NotImplementedError(_("admin_state_up=False networks " "are not supported.")) with context.session.begin(subtransactions=True): net = super(NsxPluginV2, self).update_network(context, id, network) if psec.PORTSECURITY in network['network']: self._process_network_port_security_update( context, network['network'], net) net_queue_id = network['network'].get(qos.QUEUE) if net_queue_id: self._delete_network_queue_mapping(context, id) self._process_network_queue_mapping(context, net, net_queue_id) self._process_l3_update(context, net, network['network']) self._extend_network_dict_provider(context, net) # If provided, update port name on backend; treat backend failures as # not critical (log error, but do not raise) if 'name' in network['network']: # in case of chained switches update name only for the first one nsx_switch_ids = nsx_utils.get_nsx_switch_ids( context.session, self.cluster, id) if not nsx_switch_ids or len(nsx_switch_ids) < 1: LOG.warn(_LW("Unable to find NSX mappings for neutron " "network:%s"), id) try: switchlib.update_lswitch(self.cluster, nsx_switch_ids[0], network['network']['name']) except api_exc.NsxApiException as e: LOG.warn(_LW("Logical switch update on NSX backend failed. " "Neutron network id:%(net_id)s; " "NSX lswitch id:%(lswitch_id)s;" "Error:%(error)s"), {'net_id': id, 'lswitch_id': nsx_switch_ids[0], 'error': e}) return net def create_port(self, context, port): # If PORTSECURITY is not the default value ATTR_NOT_SPECIFIED # then we pass the port to the policy engine. The reason why we don't # pass the value to the policy engine when the port is # ATTR_NOT_SPECIFIED is for the case where a port is created on a # shared network that is not owned by the tenant. port_data = port['port'] # Set port status as 'DOWN'. This will be updated by backend sync. port_data['status'] = constants.PORT_STATUS_DOWN with context.session.begin(subtransactions=True): # First we allocate port in neutron database neutron_db = super(NsxPluginV2, self).create_port(context, port) neutron_port_id = neutron_db['id'] # Update fields obtained from neutron db (eg: MAC address) port["port"].update(neutron_db) self.handle_port_metadata_access(context, neutron_db) # port security extension checks (port_security, has_ip) = self._determine_port_security_and_has_ip( context, port_data) port_data[psec.PORTSECURITY] = port_security self._process_port_port_security_create( context, port_data, neutron_db) # allowed address pair checks if attr.is_attr_set(port_data.get(addr_pair.ADDRESS_PAIRS)): if not port_security: raise addr_pair.AddressPairAndPortSecurityRequired() else: self._process_create_allowed_address_pairs( context, neutron_db, port_data[addr_pair.ADDRESS_PAIRS]) else: # remove ATTR_NOT_SPECIFIED port_data[addr_pair.ADDRESS_PAIRS] = [] # security group extension checks if port_security and has_ip: self._ensure_default_security_group_on_port(context, port) elif attr.is_attr_set(port_data.get(ext_sg.SECURITYGROUPS)): raise psec.PortSecurityAndIPRequiredForSecurityGroups() port_data[ext_sg.SECURITYGROUPS] = ( self._get_security_groups_on_port(context, port)) self._process_port_create_security_group( context, port_data, port_data[ext_sg.SECURITYGROUPS]) # QoS extension checks port_queue_id = self._check_for_queue_and_create( context, port_data) self._process_port_queue_mapping( context, port_data, port_queue_id) if (isinstance(port_data.get(mac_ext.MAC_LEARNING), bool)): self._create_mac_learning_state(context, port_data) elif mac_ext.MAC_LEARNING in port_data: port_data.pop(mac_ext.MAC_LEARNING) self._process_portbindings_create_and_update(context, port['port'], port_data) # For some reason the port bindings DB mixin does not handle # the VNIC_TYPE attribute, which is required by nova for # setting up VIFs. context.session.flush() port_data[pbin.VNIC_TYPE] = pbin.VNIC_NORMAL # DB Operation is complete, perform NSX operation try: port_data = port['port'].copy() port_create_func = self._port_drivers['create'].get( port_data['device_owner'], self._port_drivers['create']['default']) port_create_func(context, port_data) LOG.debug("port created on NSX backend for tenant " "%(tenant_id)s: (%(id)s)", port_data) except n_exc.NotFound: LOG.warning(_LW("Logical switch for network %s was not " "found in NSX."), port_data['network_id']) # Put port in error on neutron DB with context.session.begin(subtransactions=True): port = self._get_port(context, neutron_port_id) port_data['status'] = constants.PORT_STATUS_ERROR port['status'] = port_data['status'] context.session.add(port) except Exception: # Port must be removed from neutron DB with excutils.save_and_reraise_exception(): LOG.error(_LE("Unable to create port or set port " "attachment in NSX.")) with context.session.begin(subtransactions=True): self._delete_port(context, neutron_port_id) self.handle_port_dhcp_access(context, port_data, action='create_port') return port_data def update_port(self, context, id, port): delete_security_groups = self._check_update_deletes_security_groups( port) has_security_groups = self._check_update_has_security_groups(port) delete_addr_pairs = self._check_update_deletes_allowed_address_pairs( port) has_addr_pairs = self._check_update_has_allowed_address_pairs(port) with context.session.begin(subtransactions=True): ret_port = super(NsxPluginV2, self).update_port( context, id, port) # Save current mac learning state to check whether it's # being updated or not old_mac_learning_state = ret_port.get(mac_ext.MAC_LEARNING) # copy values over - except fixed_ips as # they've already been processed port['port'].pop('fixed_ips', None) ret_port.update(port['port']) tenant_id = self._get_tenant_id_for_create(context, ret_port) # populate port_security setting if psec.PORTSECURITY not in port['port']: ret_port[psec.PORTSECURITY] = self._get_port_security_binding( context, id) has_ip = self._ip_on_port(ret_port) # validate port security and allowed address pairs if not ret_port[psec.PORTSECURITY]: # has address pairs in request if has_addr_pairs: raise addr_pair.AddressPairAndPortSecurityRequired() elif not delete_addr_pairs: # check if address pairs are in db ret_port[addr_pair.ADDRESS_PAIRS] = ( self.get_allowed_address_pairs(context, id)) if ret_port[addr_pair.ADDRESS_PAIRS]: raise addr_pair.AddressPairAndPortSecurityRequired() if (delete_addr_pairs or has_addr_pairs): # delete address pairs and read them in self._delete_allowed_address_pairs(context, id) self._process_create_allowed_address_pairs( context, ret_port, ret_port[addr_pair.ADDRESS_PAIRS]) # checks if security groups were updated adding/modifying # security groups, port security is set and port has ip if not (has_ip and ret_port[psec.PORTSECURITY]): if has_security_groups: raise psec.PortSecurityAndIPRequiredForSecurityGroups() # Update did not have security groups passed in. Check # that port does not have any security groups already on it. filters = {'port_id': [id]} security_groups = ( super(NsxPluginV2, self)._get_port_security_group_bindings( context, filters) ) if security_groups and not delete_security_groups: raise psec.PortSecurityPortHasSecurityGroup() if (delete_security_groups or has_security_groups): # delete the port binding and read it with the new rules. self._delete_port_security_group_bindings(context, id) sgids = self._get_security_groups_on_port(context, port) self._process_port_create_security_group(context, ret_port, sgids) if psec.PORTSECURITY in port['port']: self._process_port_port_security_update( context, port['port'], ret_port) port_queue_id = self._check_for_queue_and_create( context, ret_port) # Populate the mac learning attribute new_mac_learning_state = port['port'].get(mac_ext.MAC_LEARNING) if (new_mac_learning_state is not None and old_mac_learning_state != new_mac_learning_state): self._update_mac_learning_state(context, id, new_mac_learning_state) ret_port[mac_ext.MAC_LEARNING] = new_mac_learning_state self._delete_port_queue_mapping(context, ret_port['id']) self._process_port_queue_mapping(context, ret_port, port_queue_id) LOG.debug("Updating port: %s", port) nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id( context.session, self.cluster, id) # Convert Neutron security groups identifiers into NSX security # profiles identifiers nsx_sec_profile_ids = [ nsx_utils.get_nsx_security_group_id( context.session, self.cluster, neutron_sg_id) for neutron_sg_id in (ret_port[ext_sg.SECURITYGROUPS] or [])] if nsx_port_id: try: switchlib.update_port( self.cluster, nsx_switch_id, nsx_port_id, id, tenant_id, ret_port['name'], ret_port['device_id'], ret_port['admin_state_up'], ret_port['mac_address'], ret_port['fixed_ips'], ret_port[psec.PORTSECURITY], nsx_sec_profile_ids, ret_port[qos.QUEUE], ret_port.get(mac_ext.MAC_LEARNING), ret_port.get(addr_pair.ADDRESS_PAIRS)) # Update the port status from nsx. If we fail here hide it # since the port was successfully updated but we were not # able to retrieve the status. ret_port['status'] = switchlib.get_port_status( self.cluster, nsx_switch_id, nsx_port_id) # FIXME(arosen) improve exception handling. except Exception: ret_port['status'] = constants.PORT_STATUS_ERROR LOG.exception(_LE("Unable to update port id: %s."), nsx_port_id) # If nsx_port_id is not in database or in nsx put in error state. else: ret_port['status'] = constants.PORT_STATUS_ERROR self._process_portbindings_create_and_update(context, port['port'], ret_port) return ret_port def delete_port(self, context, id, l3_port_check=True, nw_gw_port_check=True): """Deletes a port on a specified Virtual Network. If the port contains a remote interface attachment, the remote interface is first un-plugged and then the port is deleted. :returns: None :raises: exception.PortInUse :raises: exception.PortNotFound :raises: exception.NetworkNotFound """ # if needed, check to see if this is a port owned by # a l3 router. If so, we should prevent deletion here if l3_port_check: self.prevent_l3_port_deletion(context, id) neutron_db_port = self.get_port(context, id) # Perform the same check for ports owned by layer-2 gateways if nw_gw_port_check: self.prevent_network_gateway_port_deletion(context, neutron_db_port) port_delete_func = self._port_drivers['delete'].get( neutron_db_port['device_owner'], self._port_drivers['delete']['default']) port_delete_func(context, neutron_db_port) self.disassociate_floatingips(context, id) with context.session.begin(subtransactions=True): queue = self._get_port_queue_bindings(context, {'port_id': [id]}) # metadata_dhcp_host_route self.handle_port_metadata_access( context, neutron_db_port, is_delete=True) super(NsxPluginV2, self).delete_port(context, id) # Delete qos queue if possible if queue: self.delete_qos_queue(context, queue[0]['queue_id'], False) self.handle_port_dhcp_access( context, neutron_db_port, action='delete_port') def get_port(self, context, id, fields=None): with context.session.begin(subtransactions=True): if (self.nsx_sync_opts.always_read_status or fields and 'status' in fields): # Perform explicit state synchronization db_port = self._get_port(context, id) self._synchronizer.synchronize_port( context, db_port) return self._make_port_dict(db_port, fields) else: return super(NsxPluginV2, self).get_port(context, id, fields) def get_router(self, context, id, fields=None): if (self.nsx_sync_opts.always_read_status or fields and 'status' in fields): db_router = self._get_router(context, id) # Perform explicit state synchronization self._synchronizer.synchronize_router( context, db_router) return self._make_router_dict(db_router, fields) else: return super(NsxPluginV2, self).get_router(context, id, fields) def _create_lrouter(self, context, router, nexthop): tenant_id = self._get_tenant_id_for_create(context, router) distributed = router.get('distributed') try: lrouter = routerlib.create_lrouter( self.cluster, router['id'], tenant_id, router['name'], nexthop, distributed=attr.is_attr_set(distributed) and distributed) except nsx_exc.InvalidVersion: msg = _("Cannot create a distributed router with the NSX " "platform currently in execution. Please, try " "without specifying the 'distributed' attribute.") LOG.exception(msg) raise n_exc.BadRequest(resource='router', msg=msg) except api_exc.NsxApiException: err_msg = _("Unable to create logical router on NSX Platform") LOG.exception(err_msg) raise nsx_exc.NsxPluginException(err_msg=err_msg) # Create the port here - and update it later if we have gw_info try: self._create_and_attach_router_port( self.cluster, context, lrouter['uuid'], {'fake_ext_gw': True}, "L3GatewayAttachment", self.cluster.default_l3_gw_service_uuid) except nsx_exc.NsxPluginException: LOG.exception(_LE("Unable to create L3GW port on logical router " "%(router_uuid)s. Verify Default Layer-3 " "Gateway service %(def_l3_gw_svc)s id is " "correct"), {'router_uuid': lrouter['uuid'], 'def_l3_gw_svc': self.cluster.default_l3_gw_service_uuid}) # Try and remove logical router from NSX routerlib.delete_lrouter(self.cluster, lrouter['uuid']) # Return user a 500 with an apter message raise nsx_exc.NsxPluginException( err_msg=(_("Unable to create router %s on NSX backend") % router['id'])) lrouter['status'] = plugin_const.ACTIVE return lrouter def create_router(self, context, router): # NOTE(salvatore-orlando): We completely override this method in # order to be able to use the NSX ID as Neutron ID # TODO(salvatore-orlando): Propose upstream patch for allowing # 3rd parties to specify IDs as we do with l2 plugin r = router['router'] has_gw_info = False tenant_id = self._get_tenant_id_for_create(context, r) # default value to set - nsx wants it (even if we don't have it) nexthop = NSX_DEFAULT_NEXTHOP # if external gateway info are set, then configure nexthop to # default external gateway if 'external_gateway_info' in r and r.get('external_gateway_info'): has_gw_info = True gw_info = r['external_gateway_info'] del r['external_gateway_info'] # The following DB read will be performed again when updating # gateway info. This is not great, but still better than # creating NSX router here and updating it later network_id = (gw_info.get('network_id', None) if gw_info else None) if network_id: ext_net = self._get_network(context, network_id) if not ext_net.external: msg = (_("Network '%s' is not a valid external " "network") % network_id) raise n_exc.BadRequest(resource='router', msg=msg) if ext_net.subnets: ext_subnet = ext_net.subnets[0] nexthop = ext_subnet.gateway_ip # NOTE(salv-orlando): Pre-generating uuid for Neutron # router. This will be removed once the router create operation # becomes an asynchronous task neutron_router_id = str(uuid.uuid4()) r['id'] = neutron_router_id lrouter = self._create_lrouter(context, r, nexthop) # Update 'distributed' with value returned from NSX # This will be useful for setting the value if the API request # did not specify any value for the 'distributed' attribute # Platforms older than 3.x do not support the attribute r['distributed'] = lrouter.get('distributed', False) # TODO(salv-orlando): Deal with backend object removal in case # of db failures with context.session.begin(subtransactions=True): # Transaction nesting is needed to avoid foreign key violations # when processing the distributed router binding with context.session.begin(subtransactions=True): router_db = l3_db.Router(id=neutron_router_id, tenant_id=tenant_id, name=r['name'], admin_state_up=r['admin_state_up'], status=lrouter['status']) context.session.add(router_db) self._process_extra_attr_router_create(context, router_db, r) # Ensure neutron router is moved into the transaction's buffer context.session.flush() # Add mapping between neutron and nsx identifiers nsx_db.add_neutron_nsx_router_mapping( context.session, router_db['id'], lrouter['uuid']) if has_gw_info: # NOTE(salv-orlando): This operation has been moved out of the # database transaction since it performs several NSX queries, # ithis ncreasing the risk of deadlocks between eventlet and # sqlalchemy operations. # Set external gateway and remove router in case of failure try: self._update_router_gw_info(context, router_db['id'], gw_info) except (n_exc.NeutronException, api_exc.NsxApiException): with excutils.save_and_reraise_exception(): # As setting gateway failed, the router must be deleted # in order to ensure atomicity router_id = router_db['id'] LOG.warn(_LW("Failed to set gateway info for router being " "created:%s - removing router"), router_id) self.delete_router(context, router_id) LOG.info(_LI("Create router failed while setting external " "gateway. Router:%s has been removed from " "DB and backend"), router_id) return self._make_router_dict(router_db) def _update_lrouter(self, context, router_id, name, nexthop, routes=None): nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, router_id) return routerlib.update_lrouter( self.cluster, nsx_router_id, name, nexthop, routes=routes) def _update_lrouter_routes(self, context, router_id, routes): nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, router_id) routerlib.update_explicit_routes_lrouter( self.cluster, nsx_router_id, routes) def update_router(self, context, router_id, router): # Either nexthop is updated or should be kept as it was before r = router['router'] nexthop = None if 'external_gateway_info' in r and r.get('external_gateway_info'): gw_info = r['external_gateway_info'] # The following DB read will be performed again when updating # gateway info. This is not great, but still better than # creating NSX router here and updating it later network_id = (gw_info.get('network_id', None) if gw_info else None) if network_id: ext_net = self._get_network(context, network_id) if not ext_net.external: msg = (_("Network '%s' is not a valid external " "network") % network_id) raise n_exc.BadRequest(resource='router', msg=msg) if ext_net.subnets: ext_subnet = ext_net.subnets[0] nexthop = ext_subnet.gateway_ip try: for route in r.get('routes', []): if route['destination'] == '0.0.0.0/0': msg = _("'routes' cannot contain route '0.0.0.0/0', " "this must be updated through the default " "gateway attribute") raise n_exc.BadRequest(resource='router', msg=msg) previous_routes = self._update_lrouter( context, router_id, r.get('name'), nexthop, routes=r.get('routes')) # NOTE(salv-orlando): The exception handling below is not correct, but # unfortunately nsxlib raises a neutron notfound exception when an # object is not found in the underlying backend except n_exc.NotFound: # Put the router in ERROR status with context.session.begin(subtransactions=True): router_db = self._get_router(context, router_id) router_db['status'] = constants.NET_STATUS_ERROR raise nsx_exc.NsxPluginException( err_msg=_("Logical router %s not found " "on NSX Platform") % router_id) except api_exc.NsxApiException: raise nsx_exc.NsxPluginException( err_msg=_("Unable to update logical router on NSX Platform")) except nsx_exc.InvalidVersion: msg = _("Request cannot contain 'routes' with the NSX " "platform currently in execution. Please, try " "without specifying the static routes.") LOG.exception(msg) raise n_exc.BadRequest(resource='router', msg=msg) try: return super(NsxPluginV2, self).update_router(context, router_id, router) except (extraroute.InvalidRoutes, extraroute.RouterInterfaceInUseByRoute, extraroute.RoutesExhausted): with excutils.save_and_reraise_exception(): # revert changes made to NSX self._update_lrouter_routes( context, router_id, previous_routes) def _delete_lrouter(self, context, router_id, nsx_router_id): # The neutron router id (router_id) is ignored in this routine, # but used in plugins deriving from this one routerlib.delete_lrouter(self.cluster, nsx_router_id) def delete_router(self, context, router_id): with context.session.begin(subtransactions=True): # TODO(salv-orlando): This call should have no effect on delete # router, but if it does, it should not happen within a # transaction, and it should be restored on rollback self.handle_router_metadata_access( context, router_id, interface=None) # Pre-delete checks # NOTE(salv-orlando): These checks will be repeated anyway when # calling the superclass. This is wasteful, but is the simplest # way of ensuring a consistent removal of the router both in # the neutron Database and in the NSX backend. # TODO(salv-orlando): split pre-delete checks and actual # deletion in superclass. # Ensure that the router is not used fips = self.get_floatingips_count( context.elevated(), filters={'router_id': [router_id]}) if fips: raise l3.RouterInUse(router_id=router_id) device_filter = {'device_id': [router_id], 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]} ports = self._core_plugin.get_ports_count(context.elevated(), filters=device_filter) if ports: raise l3.RouterInUse(router_id=router_id) nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, router_id) # It is safe to remove the router from the database, so remove it # from the backend try: self._delete_lrouter(context, router_id, nsx_router_id) except n_exc.NotFound: # This is not a fatal error, but needs to be logged LOG.warning(_LW("Logical router '%s' not found " "on NSX Platform"), router_id) except api_exc.NsxApiException: raise nsx_exc.NsxPluginException( err_msg=(_("Unable to delete logical router '%s' " "on NSX Platform") % nsx_router_id)) # Remove the NSX mapping first in order to ensure a mapping to # a non-existent NSX router is not left in the DB in case of # failure while removing the router from the neutron DB try: nsx_db.delete_neutron_nsx_router_mapping( context.session, router_id) except db_exc.DBError as d_exc: # Do not make this error fatal LOG.warn(_LW("Unable to remove NSX mapping for Neutron router " "%(router_id)s because of the following exception:" "%(d_exc)s"), {'router_id': router_id, 'd_exc': str(d_exc)}) # Perform the actual delete on the Neutron DB super(NsxPluginV2, self).delete_router(context, router_id) def _add_subnet_snat_rule(self, context, router, subnet): gw_port = router.gw_port if gw_port and router.enable_snat: # There is a change gw_port might have multiple IPs # In that case we will consider only the first one if gw_port.get('fixed_ips'): snat_ip = gw_port['fixed_ips'][0]['ip_address'] cidr_prefix = int(subnet['cidr'].split('/')[1]) nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, router['id']) routerlib.create_lrouter_snat_rule( self.cluster, nsx_router_id, snat_ip, snat_ip, order=NSX_EXTGW_NAT_RULES_ORDER - cidr_prefix, match_criteria={'source_ip_addresses': subnet['cidr']}) def _delete_subnet_snat_rule(self, context, router, subnet): # Remove SNAT rule if external gateway is configured if router.gw_port: nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, router['id']) routerlib.delete_nat_rules_by_match( self.cluster, nsx_router_id, "SourceNatRule", max_num_expected=1, min_num_expected=1, raise_on_len_mismatch=False, source_ip_addresses=subnet['cidr']) def add_router_interface(self, context, router_id, interface_info): # When adding interface by port_id we need to create the # peer port on the nsx logical router in this routine port_id = interface_info.get('port_id') router_iface_info = super(NsxPluginV2, self).add_router_interface( context, router_id, interface_info) # router_iface_info will always have a subnet_id attribute subnet_id = router_iface_info['subnet_id'] nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, router_id) if port_id: port_data = self.get_port(context, port_id) # If security groups are present we need to remove them as # this is a router port and disable port security. if port_data['security_groups']: self.update_port(context, port_id, {'port': {'security_groups': [], psec.PORTSECURITY: False}}) nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id( context.session, self.cluster, port_id) # Unplug current attachment from lswitch port switchlib.plug_vif_interface(self.cluster, nsx_switch_id, nsx_port_id, "NoAttachment") # Create logical router port and plug patch attachment self._create_and_attach_router_port( self.cluster, context, nsx_router_id, port_data, "PatchAttachment", nsx_port_id, subnet_ids=[subnet_id]) subnet = self._get_subnet(context, subnet_id) # If there is an external gateway we need to configure the SNAT rule. # Fetch router from DB router = self._get_router(context, router_id) self._add_subnet_snat_rule(context, router, subnet) routerlib.create_lrouter_nosnat_rule( self.cluster, nsx_router_id, order=NSX_NOSNAT_RULES_ORDER, match_criteria={'destination_ip_addresses': subnet['cidr']}) # Ensure the NSX logical router has a connection to a 'metadata access' # network (with a proxy listening on its DHCP port), by creating it # if needed. self.handle_router_metadata_access( context, router_id, interface=router_iface_info) LOG.debug("Add_router_interface completed for subnet:%(subnet_id)s " "and router:%(router_id)s", {'subnet_id': subnet_id, 'router_id': router_id}) return router_iface_info def remove_router_interface(self, context, router_id, interface_info): # The code below is duplicated from base class, but comes handy # as we need to retrieve the router port id before removing the port subnet = None subnet_id = None if 'port_id' in interface_info: port_id = interface_info['port_id'] # find subnet_id - it is need for removing the SNAT rule port = self._get_port(context, port_id) if port.get('fixed_ips'): subnet_id = port['fixed_ips'][0]['subnet_id'] if not (port['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF and port['device_id'] == router_id): raise l3.RouterInterfaceNotFound(router_id=router_id, port_id=port_id) elif 'subnet_id' in interface_info: subnet_id = interface_info['subnet_id'] subnet = self._get_subnet(context, subnet_id) rport_qry = context.session.query(models_v2.Port) ports = rport_qry.filter_by( device_id=router_id, device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF, network_id=subnet['network_id']) for p in ports: if p['fixed_ips'][0]['subnet_id'] == subnet_id: port_id = p['id'] break else: raise l3.RouterInterfaceNotFoundForSubnet(router_id=router_id, subnet_id=subnet_id) # Finally remove the data from the Neutron DB # This will also destroy the port on the logical switch info = super(NsxPluginV2, self).remove_router_interface( context, router_id, interface_info) try: # Ensure the connection to the 'metadata access network' # is removed (with the network) if this the last subnet # on the router self.handle_router_metadata_access( context, router_id, interface=info) if not subnet: subnet = self._get_subnet(context, subnet_id) router = self._get_router(context, router_id) # If router is enabled_snat = False there are no snat rules to # delete. if router.enable_snat: self._delete_subnet_snat_rule(context, router, subnet) # Relax the minimum expected number as the nosnat rules # do not exist in 2.x deployments nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, router_id) routerlib.delete_nat_rules_by_match( self.cluster, nsx_router_id, "NoSourceNatRule", max_num_expected=1, min_num_expected=0, raise_on_len_mismatch=False, destination_ip_addresses=subnet['cidr']) except n_exc.NotFound: LOG.error(_LE("Logical router resource %s not found " "on NSX platform"), router_id) except api_exc.NsxApiException: raise nsx_exc.NsxPluginException( err_msg=(_("Unable to update logical router" "on NSX Platform"))) return info def _retrieve_and_delete_nat_rules(self, context, floating_ip_address, internal_ip, nsx_router_id, min_num_rules_expected=0): """Finds and removes NAT rules from a NSX router.""" # NOTE(salv-orlando): The context parameter is ignored in this method # but used by derived classes try: # Remove DNAT rule for the floating IP routerlib.delete_nat_rules_by_match( self.cluster, nsx_router_id, "DestinationNatRule", max_num_expected=1, min_num_expected=min_num_rules_expected, destination_ip_addresses=floating_ip_address) # Remove SNAT rules for the floating IP routerlib.delete_nat_rules_by_match( self.cluster, nsx_router_id, "SourceNatRule", max_num_expected=1, min_num_expected=min_num_rules_expected, source_ip_addresses=internal_ip) routerlib.delete_nat_rules_by_match( self.cluster, nsx_router_id, "SourceNatRule", max_num_expected=1, min_num_expected=min_num_rules_expected, destination_ip_addresses=internal_ip) except api_exc.NsxApiException: with excutils.save_and_reraise_exception(): LOG.exception(_LE("An error occurred while removing NAT rules " "on the NSX platform for floating ip:%s"), floating_ip_address) except nsx_exc.NatRuleMismatch: # Do not surface to the user LOG.warning(_LW("An incorrect number of matching NAT rules " "was found on the NSX platform")) def _remove_floatingip_address(self, context, fip_db): # Remove floating IP address from logical router port # Fetch logical port of router's external gateway router_id = fip_db.router_id nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, router_id) nsx_gw_port_id = routerlib.find_router_gw_port( context, self.cluster, nsx_router_id)['uuid'] ext_neutron_port_db = self._get_port(context.elevated(), fip_db.floating_port_id) nsx_floating_ips = self._build_ip_address_list( context.elevated(), ext_neutron_port_db['fixed_ips']) routerlib.update_lrouter_port_ips(self.cluster, nsx_router_id, nsx_gw_port_id, ips_to_add=[], ips_to_remove=nsx_floating_ips) def _get_fip_assoc_data(self, context, fip, floatingip_db): if (('fixed_ip_address' in fip and fip['fixed_ip_address']) and not ('port_id' in fip and fip['port_id'])): msg = _("fixed_ip_address cannot be specified without a port_id") raise n_exc.BadRequest(resource='floatingip', msg=msg) port_id = internal_ip = router_id = None if 'port_id' in fip and fip['port_id']: fip_qry = context.session.query(l3_db.FloatingIP) port_id, internal_ip, router_id = self.get_assoc_data( context, fip, floatingip_db['floating_network_id']) try: fip_qry.filter_by( fixed_port_id=fip['port_id'], floating_network_id=floatingip_db['floating_network_id'], fixed_ip_address=internal_ip).one() raise l3.FloatingIPPortAlreadyAssociated( port_id=fip['port_id'], fip_id=floatingip_db['id'], floating_ip_address=floatingip_db['floating_ip_address'], fixed_ip=floatingip_db['fixed_ip_address'], net_id=floatingip_db['floating_network_id']) except sa_exc.NoResultFound: pass return (port_id, internal_ip, router_id) def _floatingip_status(self, floatingip_db, associated): if (associated and floatingip_db['status'] != constants.FLOATINGIP_STATUS_ACTIVE): return constants.FLOATINGIP_STATUS_ACTIVE elif (not associated and floatingip_db['status'] != constants.FLOATINGIP_STATUS_DOWN): return constants.FLOATINGIP_STATUS_DOWN # in any case ensure the status is not reset by this method! return floatingip_db['status'] def _update_fip_assoc(self, context, fip, floatingip_db, external_port): """Update floating IP association data. Overrides method from base class. The method is augmented for creating NAT rules in the process. """ # Store router currently serving the floating IP old_router_id = floatingip_db.router_id port_id, internal_ip, router_id = self._get_fip_assoc_data( context, fip, floatingip_db) floating_ip = floatingip_db['floating_ip_address'] # If there's no association router_id will be None if router_id: nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, router_id) self._retrieve_and_delete_nat_rules( context, floating_ip, internal_ip, nsx_router_id) # Fetch logical port of router's external gateway # Fetch logical port of router's external gateway nsx_floating_ips = self._build_ip_address_list( context.elevated(), external_port['fixed_ips']) floating_ip = floatingip_db['floating_ip_address'] # Retrieve and delete existing NAT rules, if any if old_router_id: nsx_old_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, old_router_id) # Retrieve the current internal ip _p, _s, old_internal_ip = self._internal_fip_assoc_data( context, {'id': floatingip_db.id, 'port_id': floatingip_db.fixed_port_id, 'fixed_ip_address': floatingip_db.fixed_ip_address, 'tenant_id': floatingip_db.tenant_id}) nsx_gw_port_id = routerlib.find_router_gw_port( context, self.cluster, nsx_old_router_id)['uuid'] self._retrieve_and_delete_nat_rules( context, floating_ip, old_internal_ip, nsx_old_router_id) routerlib.update_lrouter_port_ips( self.cluster, nsx_old_router_id, nsx_gw_port_id, ips_to_add=[], ips_to_remove=nsx_floating_ips) if router_id: nsx_gw_port_id = routerlib.find_router_gw_port( context, self.cluster, nsx_router_id)['uuid'] # Re-create NAT rules only if a port id is specified if fip.get('port_id'): try: # Setup DNAT rules for the floating IP routerlib.create_lrouter_dnat_rule( self.cluster, nsx_router_id, internal_ip, order=NSX_FLOATINGIP_NAT_RULES_ORDER, match_criteria={'destination_ip_addresses': floating_ip}) # Setup SNAT rules for the floating IP # Create a SNAT rule for enabling connectivity to the # floating IP from the same network as the internal port # Find subnet id for internal_ip from fixed_ips internal_port = self._get_port(context, port_id) # Cchecks not needed on statements below since otherwise # _internal_fip_assoc_data would have raised subnet_ids = [ip['subnet_id'] for ip in internal_port['fixed_ips'] if ip['ip_address'] == internal_ip] internal_subnet_cidr = self._build_ip_address_list( context, internal_port['fixed_ips'], subnet_ids=subnet_ids)[0] routerlib.create_lrouter_snat_rule( self.cluster, nsx_router_id, floating_ip, floating_ip, order=NSX_NOSNAT_RULES_ORDER - 1, match_criteria={'source_ip_addresses': internal_subnet_cidr, 'destination_ip_addresses': internal_ip}) # setup snat rule such that src ip of a IP packet when # using floating is the floating ip itself. routerlib.create_lrouter_snat_rule( self.cluster, nsx_router_id, floating_ip, floating_ip, order=NSX_FLOATINGIP_NAT_RULES_ORDER, match_criteria={'source_ip_addresses': internal_ip}) # Add Floating IP address to router_port routerlib.update_lrouter_port_ips( self.cluster, nsx_router_id, nsx_gw_port_id, ips_to_add=nsx_floating_ips, ips_to_remove=[]) except api_exc.NsxApiException: LOG.exception(_LE("An error occurred while creating NAT " "rules on the NSX platform for floating " "ip:%(floating_ip)s mapped to " "internal ip:%(internal_ip)s"), {'floating_ip': floating_ip, 'internal_ip': internal_ip}) msg = _("Failed to update NAT rules for floatingip update") raise nsx_exc.NsxPluginException(err_msg=msg) # Update also floating ip status (no need to call base class method) floatingip_db.update( {'fixed_ip_address': internal_ip, 'fixed_port_id': port_id, 'router_id': router_id, 'status': self._floatingip_status(floatingip_db, router_id)}) def delete_floatingip(self, context, id): fip_db = self._get_floatingip(context, id) # Check whether the floating ip is associated or not if fip_db.fixed_port_id: nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, fip_db.router_id) self._retrieve_and_delete_nat_rules(context, fip_db.floating_ip_address, fip_db.fixed_ip_address, nsx_router_id, min_num_rules_expected=1) # Remove floating IP address from logical router port self._remove_floatingip_address(context, fip_db) return super(NsxPluginV2, self).delete_floatingip(context, id) def disassociate_floatingips(self, context, port_id): try: fip_qry = context.session.query(l3_db.FloatingIP) fip_dbs = fip_qry.filter_by(fixed_port_id=port_id) for fip_db in fip_dbs: nsx_router_id = nsx_utils.get_nsx_router_id( context.session, self.cluster, fip_db.router_id) self._retrieve_and_delete_nat_rules(context, fip_db.floating_ip_address, fip_db.fixed_ip_address, nsx_router_id, min_num_rules_expected=1) self._remove_floatingip_address(context, fip_db) except sa_exc.NoResultFound: LOG.debug("The port '%s' is not associated with floating IPs", port_id) except n_exc.NotFound: LOG.warning(_LW("Nat rules not found in nsx for port: %s"), id) # NOTE(ihrachys): L3 agent notifications don't make sense for # NSX VMWare plugin since there is no L3 agent in such setup, so # disabling them here. super(NsxPluginV2, self).disassociate_floatingips( context, port_id, do_notify=False) def create_network_gateway(self, context, network_gateway): """Create a layer-2 network gateway. Create the gateway service on NSX platform and corresponding data structures in Neutron datase. """ gw_data = network_gateway[networkgw.GATEWAY_RESOURCE_NAME] tenant_id = self._get_tenant_id_for_create(context, gw_data) # Ensure the default gateway in the config file is in sync with the db self._ensure_default_network_gateway() # Validate provided gateway device list self._validate_device_list(context, tenant_id, gw_data) devices = gw_data['devices'] # Populate default physical network where not specified for device in devices: if not device.get('interface_name'): device['interface_name'] = self.cluster.default_interface_name try: # Replace Neutron device identifiers with NSX identifiers dev_map = dict((dev['id'], dev['interface_name']) for dev in devices) nsx_devices = [] for db_device in self._query_gateway_devices( context, filters={'id': [device['id'] for device in devices]}): nsx_devices.append( {'id': db_device['nsx_id'], 'interface_name': dev_map[db_device['id']]}) nsx_res = l2gwlib.create_l2_gw_service( self.cluster, tenant_id, gw_data['name'], nsx_devices) nsx_uuid = nsx_res.get('uuid') except api_exc.Conflict: raise nsx_exc.L2GatewayAlreadyInUse(gateway=gw_data['name']) except api_exc.NsxApiException: err_msg = _("Unable to create l2_gw_service for: %s") % gw_data LOG.exception(err_msg) raise nsx_exc.NsxPluginException(err_msg=err_msg) gw_data['id'] = nsx_uuid return super(NsxPluginV2, self).create_network_gateway( context, network_gateway, validate_device_list=False) def delete_network_gateway(self, context, gateway_id): """Remove a layer-2 network gateway. Remove the gateway service from NSX platform and corresponding data structures in Neutron datase. """ # Ensure the default gateway in the config file is in sync with the db self._ensure_default_network_gateway() with context.session.begin(subtransactions=True): try: super(NsxPluginV2, self).delete_network_gateway( context, gateway_id) l2gwlib.delete_l2_gw_service(self.cluster, gateway_id) except api_exc.ResourceNotFound: # Do not cause a 500 to be returned to the user if # the corresponding NSX resource does not exist LOG.exception(_LE("Unable to remove gateway service from " "NSX plaform - the resource was not found")) def get_network_gateway(self, context, id, fields=None): # Ensure the default gateway in the config file is in sync with the db self._ensure_default_network_gateway() return super(NsxPluginV2, self).get_network_gateway(context, id, fields) def get_network_gateways(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Ensure the default gateway in the config file is in sync with the db self._ensure_default_network_gateway() # Ensure the tenant_id attribute is populated on returned gateways return super(NsxPluginV2, self).get_network_gateways( context, filters, fields, sorts, limit, marker, page_reverse) def update_network_gateway(self, context, id, network_gateway): # Ensure the default gateway in the config file is in sync with the db self._ensure_default_network_gateway() # Update gateway on backend when there's a name change name = network_gateway[networkgw.GATEWAY_RESOURCE_NAME].get('name') if name: try: l2gwlib.update_l2_gw_service(self.cluster, id, name) except api_exc.NsxApiException: # Consider backend failures as non-fatal, but still warn # because this might indicate something dodgy is going on LOG.warn(_LW("Unable to update name on NSX backend " "for network gateway: %s"), id) return super(NsxPluginV2, self).update_network_gateway( context, id, network_gateway) def connect_network(self, context, network_gateway_id, network_mapping_info): # Ensure the default gateway in the config file is in sync with the db self._ensure_default_network_gateway() try: return super(NsxPluginV2, self).connect_network( context, network_gateway_id, network_mapping_info) except api_exc.Conflict: raise nsx_exc.L2GatewayAlreadyInUse(gateway=network_gateway_id) def disconnect_network(self, context, network_gateway_id, network_mapping_info): # Ensure the default gateway in the config file is in sync with the db self._ensure_default_network_gateway() return super(NsxPluginV2, self).disconnect_network( context, network_gateway_id, network_mapping_info) def _get_nsx_device_id(self, context, device_id): return self._get_gateway_device(context, device_id)['nsx_id'] def _rollback_gw_device(self, context, device_id, gw_data=None, new_status=None, is_create=False): LOG.error(_LE("Rolling back database changes for gateway device %s " "because of an error in the NSX backend"), device_id) with context.session.begin(subtransactions=True): query = self._model_query( context, networkgw_db.NetworkGatewayDevice).filter( networkgw_db.NetworkGatewayDevice.id == device_id) if is_create: query.delete(synchronize_session=False) else: super(NsxPluginV2, self).update_gateway_device( context, device_id, {networkgw.DEVICE_RESOURCE_NAME: gw_data}) if new_status: query.update({'status': new_status}, synchronize_session=False) # TODO(salv-orlando): Handlers for Gateway device operations should be # moved into the appropriate nsx_handlers package once the code for the # blueprint nsx-async-backend-communication merges def create_gateway_device_handler(self, context, gateway_device, client_certificate): neutron_id = gateway_device['id'] try: nsx_res = l2gwlib.create_gateway_device( self.cluster, gateway_device['tenant_id'], gateway_device['name'], neutron_id, self.cluster.default_tz_uuid, gateway_device['connector_type'], gateway_device['connector_ip'], client_certificate) # Fetch status (it needs another NSX API call) device_status = nsx_utils.get_nsx_device_status(self.cluster, nsx_res['uuid']) # set NSX GW device in neutron database and update status with context.session.begin(subtransactions=True): query = self._model_query( context, networkgw_db.NetworkGatewayDevice).filter( networkgw_db.NetworkGatewayDevice.id == neutron_id) query.update({'status': device_status, 'nsx_id': nsx_res['uuid']}, synchronize_session=False) LOG.debug("Neutron gateway device: %(neutron_id)s; " "NSX transport node identifier: %(nsx_id)s; " "Operational status: %(status)s.", {'neutron_id': neutron_id, 'nsx_id': nsx_res['uuid'], 'status': device_status}) return device_status except (nsx_exc.InvalidSecurityCertificate, api_exc.NsxApiException): with excutils.save_and_reraise_exception(): self._rollback_gw_device(context, neutron_id, is_create=True) def update_gateway_device_handler(self, context, gateway_device, old_gateway_device_data, client_certificate): nsx_id = gateway_device['nsx_id'] neutron_id = gateway_device['id'] try: l2gwlib.update_gateway_device( self.cluster, nsx_id, gateway_device['tenant_id'], gateway_device['name'], neutron_id, self.cluster.default_tz_uuid, gateway_device['connector_type'], gateway_device['connector_ip'], client_certificate) # Fetch status (it needs another NSX API call) device_status = nsx_utils.get_nsx_device_status(self.cluster, nsx_id) # update status with context.session.begin(subtransactions=True): query = self._model_query( context, networkgw_db.NetworkGatewayDevice).filter( networkgw_db.NetworkGatewayDevice.id == neutron_id) query.update({'status': device_status}, synchronize_session=False) LOG.debug("Neutron gateway device: %(neutron_id)s; " "NSX transport node identifier: %(nsx_id)s; " "Operational status: %(status)s.", {'neutron_id': neutron_id, 'nsx_id': nsx_id, 'status': device_status}) return device_status except (nsx_exc.InvalidSecurityCertificate, api_exc.NsxApiException): with excutils.save_and_reraise_exception(): self._rollback_gw_device(context, neutron_id, gw_data=old_gateway_device_data) except n_exc.NotFound: # The gateway device was probably deleted in the backend. # The DB change should be rolled back and the status must # be put in error with excutils.save_and_reraise_exception(): self._rollback_gw_device(context, neutron_id, gw_data=old_gateway_device_data, new_status=networkgw_db.ERROR) def get_gateway_device(self, context, device_id, fields=None): # Get device from database gw_device = super(NsxPluginV2, self).get_gateway_device( context, device_id, fields, include_nsx_id=True) # Fetch status from NSX nsx_id = gw_device['nsx_id'] device_status = nsx_utils.get_nsx_device_status(self.cluster, nsx_id) # TODO(salv-orlando): Asynchronous sync for gateway device status # Update status in database with context.session.begin(subtransactions=True): query = self._model_query( context, networkgw_db.NetworkGatewayDevice).filter( networkgw_db.NetworkGatewayDevice.id == device_id) query.update({'status': device_status}, synchronize_session=False) gw_device['status'] = device_status return gw_device def get_gateway_devices(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Get devices from database devices = super(NsxPluginV2, self).get_gateway_devices( context, filters, fields, include_nsx_id=True) # Fetch operational status from NSX, filter by tenant tag # TODO(salv-orlando): Asynchronous sync for gateway device status tenant_id = context.tenant_id if not context.is_admin else None nsx_statuses = nsx_utils.get_nsx_device_statuses(self.cluster, tenant_id) # Update statuses in database with context.session.begin(subtransactions=True): for device in devices: new_status = nsx_statuses.get(device['nsx_id']) if new_status: device['status'] = new_status return devices def create_gateway_device(self, context, gateway_device): # NOTE(salv-orlando): client-certificate will not be stored # in the database device_data = gateway_device[networkgw.DEVICE_RESOURCE_NAME] client_certificate = device_data.pop('client_certificate') gw_device = super(NsxPluginV2, self).create_gateway_device( context, gateway_device) # DB operation was successful, perform NSX operation gw_device['status'] = self.create_gateway_device_handler( context, gw_device, client_certificate) return gw_device def update_gateway_device(self, context, device_id, gateway_device): # NOTE(salv-orlando): client-certificate will not be stored # in the database client_certificate = ( gateway_device[networkgw.DEVICE_RESOURCE_NAME].pop( 'client_certificate', None)) # Retrive current state from DB in case a rollback should be needed old_gw_device_data = super(NsxPluginV2, self).get_gateway_device( context, device_id, include_nsx_id=True) gw_device = super(NsxPluginV2, self).update_gateway_device( context, device_id, gateway_device, include_nsx_id=True) # DB operation was successful, perform NSX operation gw_device['status'] = self.update_gateway_device_handler( context, gw_device, old_gw_device_data, client_certificate) gw_device.pop('nsx_id') return gw_device def delete_gateway_device(self, context, device_id): nsx_device_id = self._get_nsx_device_id(context, device_id) super(NsxPluginV2, self).delete_gateway_device( context, device_id) # DB operation was successful, perform NSX operation # TODO(salv-orlando): State consistency with neutron DB # should be ensured even in case of backend failures try: l2gwlib.delete_gateway_device(self.cluster, nsx_device_id) except n_exc.NotFound: LOG.warn(_LW("Removal of gateway device: %(neutron_id)s failed on " "NSX backend (NSX id:%(nsx_id)s) because the NSX " "resource was not found"), {'neutron_id': device_id, 'nsx_id': nsx_device_id}) except api_exc.NsxApiException: with excutils.save_and_reraise_exception(): # In this case a 500 should be returned LOG.exception(_LE("Removal of gateway device: %(neutron_id)s " "failed on NSX backend (NSX id:%(nsx_id)s). " "Neutron and NSX states have diverged."), {'neutron_id': device_id, 'nsx_id': nsx_device_id}) def create_security_group(self, context, security_group, default_sg=False): """Create security group. If default_sg is true that means we are creating a default security group and we don't need to check if one exists. """ s = security_group.get('security_group') tenant_id = self._get_tenant_id_for_create(context, s) if not default_sg: self._ensure_default_security_group(context, tenant_id) # NOTE(salv-orlando): Pre-generating Neutron ID for security group. neutron_id = str(uuid.uuid4()) nsx_secgroup = secgrouplib.create_security_profile( self.cluster, tenant_id, neutron_id, s) with context.session.begin(subtransactions=True): s['id'] = neutron_id sec_group = super(NsxPluginV2, self).create_security_group( context, security_group, default_sg) context.session.flush() # Add mapping between neutron and nsx identifiers nsx_db.add_neutron_nsx_security_group_mapping( context.session, neutron_id, nsx_secgroup['uuid']) return sec_group def update_security_group(self, context, secgroup_id, security_group): secgroup = (super(NsxPluginV2, self). update_security_group(context, secgroup_id, security_group)) if ('name' in security_group['security_group'] and secgroup['name'] != 'default'): nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id( context.session, self.cluster, secgroup_id) try: name = security_group['security_group']['name'] secgrouplib.update_security_profile( self.cluster, nsx_sec_profile_id, name) except (n_exc.NotFound, api_exc.NsxApiException) as e: # Reverting the DB change is not really worthwhile # for a mismatch between names. It's the rules that # we care about. LOG.error(_LE('Error while updating security profile ' '%(uuid)s with name %(name)s: %(error)s.'), {'uuid': secgroup_id, 'name': name, 'error': e}) return secgroup def delete_security_group(self, context, security_group_id): """Delete a security group. :param security_group_id: security group rule to remove. """ with context.session.begin(subtransactions=True): security_group = super(NsxPluginV2, self).get_security_group( context, security_group_id) if not security_group: raise ext_sg.SecurityGroupNotFound(id=security_group_id) if security_group['name'] == 'default' and not context.is_admin: raise ext_sg.SecurityGroupCannotRemoveDefault() filters = {'security_group_id': [security_group['id']]} if super(NsxPluginV2, self)._get_port_security_group_bindings( context, filters): raise ext_sg.SecurityGroupInUse(id=security_group['id']) nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id( context.session, self.cluster, security_group_id) try: secgrouplib.delete_security_profile( self.cluster, nsx_sec_profile_id) except n_exc.NotFound: # The security profile was not found on the backend # do not fail in this case. LOG.warning(_LW("The NSX security profile %(sec_profile_id)s, " "associated with the Neutron security group " "%(sec_group_id)s was not found on the " "backend"), {'sec_profile_id': nsx_sec_profile_id, 'sec_group_id': security_group_id}) except api_exc.NsxApiException: # Raise and fail the operation, as there is a problem which # prevented the sec group from being removed from the backend LOG.exception(_LE("An exception occurred while removing the " "NSX security profile %(sec_profile_id)s, " "associated with Netron security group " "%(sec_group_id)s"), {'sec_profile_id': nsx_sec_profile_id, 'sec_group_id': security_group_id}) raise nsx_exc.NsxPluginException( _("Unable to remove security group %s from backend"), security_group['id']) return super(NsxPluginV2, self).delete_security_group( context, security_group_id) def _validate_security_group_rules(self, context, rules): for rule in rules['security_group_rules']: r = rule.get('security_group_rule') port_based_proto = (self._get_ip_proto_number(r['protocol']) in securitygroups_db.IP_PROTOCOL_MAP.values()) if (not port_based_proto and (r['port_range_min'] is not None or r['port_range_max'] is not None)): msg = (_("Port values not valid for " "protocol: %s") % r['protocol']) raise n_exc.BadRequest(resource='security_group_rule', msg=msg) return super(NsxPluginV2, self)._validate_security_group_rules(context, rules) def create_security_group_rule(self, context, security_group_rule): """Create a single security group rule.""" bulk_rule = {'security_group_rules': [security_group_rule]} return self.create_security_group_rule_bulk(context, bulk_rule)[0] def create_security_group_rule_bulk(self, context, security_group_rule): """Create security group rules. :param security_group_rule: list of rules to create """ s = security_group_rule.get('security_group_rules') # TODO(arosen) is there anyway we could avoid having the update of # the security group rules in nsx outside of this transaction? with context.session.begin(subtransactions=True): security_group_id = self._validate_security_group_rules( context, security_group_rule) # Check to make sure security group exists security_group = super(NsxPluginV2, self).get_security_group( context, security_group_id) if not security_group: raise ext_sg.SecurityGroupNotFound(id=security_group_id) # Check for duplicate rules self._check_for_duplicate_rules(context, s) # gather all the existing security group rules since we need all # of them to PUT to NSX. existing_rules = self.get_security_group_rules( context, {'security_group_id': [security_group['id']]}) combined_rules = sg_utils.merge_security_group_rules_with_current( context.session, self.cluster, s, existing_rules) nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id( context.session, self.cluster, security_group_id) secgrouplib.update_security_group_rules(self.cluster, nsx_sec_profile_id, combined_rules) return super( NsxPluginV2, self).create_security_group_rule_bulk_native( context, security_group_rule) def delete_security_group_rule(self, context, sgrid): """Delete a security group rule :param sgrid: security group id to remove. """ with context.session.begin(subtransactions=True): # determine security profile id security_group_rule = ( super(NsxPluginV2, self).get_security_group_rule( context, sgrid)) if not security_group_rule: raise ext_sg.SecurityGroupRuleNotFound(id=sgrid) sgid = security_group_rule['security_group_id'] current_rules = self.get_security_group_rules( context, {'security_group_id': [sgid]}) current_rules_nsx = sg_utils.get_security_group_rules_nsx_format( context.session, self.cluster, current_rules, True) sg_utils.remove_security_group_with_id_and_id_field( current_rules_nsx, sgrid) nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id( context.session, self.cluster, sgid) secgrouplib.update_security_group_rules( self.cluster, nsx_sec_profile_id, current_rules_nsx) return super(NsxPluginV2, self).delete_security_group_rule(context, sgrid) def create_qos_queue(self, context, qos_queue, check_policy=True): q = qos_queue.get('qos_queue') self._validate_qos_queue(context, q) q['id'] = queuelib.create_lqueue(self.cluster, q) return super(NsxPluginV2, self).create_qos_queue(context, qos_queue) def delete_qos_queue(self, context, queue_id, raise_in_use=True): filters = {'queue_id': [queue_id]} queues = self._get_port_queue_bindings(context, filters) if queues: if raise_in_use: raise qos.QueueInUseByPort() else: return queuelib.delete_lqueue(self.cluster, queue_id) return super(NsxPluginV2, self).delete_qos_queue(context, queue_id)
Life in Text is a short film by Los Angeles filmmaker Laurence Jacobs that tells the story of a relationship from its bitter end to its tender beginnings using text messages. It stars Tyler Ritter and Alexandra Daddario. Nostalgia sets in for a 20-something guy (Tyler Ritter), as he travels through a virtual text message world and revisits old messages between him and his ex-girlfriend (Alexandra Daddario). The digital drama begins at the end of the couple’s history, when they are no longer speaking, and ends at the beginning of their relationship: their first text message.
#!/usr/bin/env python # -*- coding: utf-8 -*- # # $Id$ import os, sys, re import time, math import wave import subprocess import multiprocessing class Decode: """ Decoder """ def __init__(self, filename): self.filename = filename self.origin_filename = None self.fileext = { '.tta' : self.tta, '.flac' : self.flac, '.ape' : self.ape, '.wv' : self.wv, } if not os.path.exists(filename): # try to find an encoded file and decode it to wave format for extension, dec_func in self.fileext.iteritems(): filename = os.path.splitext(filename)[0] + extension if os.path.exists(filename): print 'Decode:', filename self.origin_filename = filename dec_func() break def __del__(self): if self.origin_filename: os.remove(self.filename) def ape(self): subprocess.call(['mac', self.origin_filename, self.filename, '-d', ] ) def flac(self): subprocess.call(['flac', '-d', self.origin_filename, ] ) def tta(self): subprocess.call(['ttaenc', '-d', self.origin_filename, '.', ] ) def wv(self): subprocess.call(['wvunpack', self.origin_filename, '.', ] ) class Track: def __init__(self, track_index, file, parent): # from parent for member in ('cd_performer', 'cd_title', 'cd_date', 'cd_genre'): setattr(self, member, getattr(parent, member)) self.file = file self.title = '' self.index = track_index self.performer = self.cd_performer self.time = { 1:0.0 } def __str__(self): return "{} - {} - {}".format(self.index, self.title, self.time) class CueSheet: def __init__(self, cue_sheet): self.sheet = cue_sheet self.cd_performer = '' self.cd_title = '' self.cd_genre = '' self.cd_date = '' self.current_file = '' self.tracks = [] self.regex_lst = ( (re.compile(r'PERFORMER\s(.+)'), self.__performer), (re.compile(r'REM DATE\s(.+)'), self.__date), (re.compile(r'REM GENRE\s(.+)'), self.__genre), (re.compile(r'TITLE\s(.+)'), self.__title), (re.compile(r'FILE\s(.+)\sWAVE'), self.__file), # only wave (re.compile(r'TRACK\s(\d{2})\sAUDIO'), self.__track), # only audio (re.compile(r'INDEX\s(\d{2})\s(\d{1,3}:\d{2}:\d{2})'), self.__index), ) def __performer(self, s): if not self.tracks: self.cd_performer = s else: self.tracks[-1].performer = s def __title(self, s): if not self.tracks: self.cd_title = s else: self.tracks[-1].title = s def __genre(self, s): self.cd_genre = s def __date(self, s): self.cd_date = s def __file(self, s): self.current_file = s def __track(self, s): self.tracks.append( Track(s, self.current_file, self) ) @staticmethod def index_split(s): t = s.split(':') return (int(t[0])*60 + int(t[1]))*75 + int(t[2]) @staticmethod def dqstrip(s): if s[0] == '"' and s[-1] == '"': return s[1:-1] return s @staticmethod def unquote(t): return tuple([CueSheet.dqstrip(s.strip()) for s in t]) def __index(self, idx, s): idx = int(idx) self.tracks[-1].time[idx] = self.index_split(s) def read(self): for line in open(self.sheet): for regex, handler in self.regex_lst: mobj = regex.match(line.strip()) if mobj: #~ print mobj.group(1) handler(*self.unquote(mobj.groups())) #~ for x in self.tracks: print x def split(self, encoders=None): encoding_queue = multiprocessing.Queue(multiprocessing.cpu_count()) keep_alive = [] # a dummy object for i, track in enumerate(self.tracks): keep_alive.append( Decode(track.file) ) wafi = wave.open(track.file, 'rb') param_names = ('nchannels', 'sampwidth', 'framerate', 'nframes', 'comptype', 'compname') params = wafi.getparams() param_dict = dict(zip(param_names, params)) #~ print param_dict['framerate'] # calculate number of frames start = param_dict['framerate'] * track.time[1] // 75 stop = param_dict['nframes'] if len(self.tracks) > i+1 and self.tracks[i+1].file == track.file: stop = int(param_dict['framerate'] * self.tracks[i+1].time.get(0, self.tracks[i+1].time[1])) // 75 trackfilename = ' - '.join((track.index, track.title)) + '.wav' trackfilename = trackfilename.replace('?', '') trackfilename = trackfilename.replace('/', '') trackfilename = trackfilename.replace('\\', '') trackfilename = trackfilename.replace(':', '') if not os.path.exists(trackfilename): wafi_write = wave.open(trackfilename, 'wb') newparams = list(params) newparams[3] = 0 wafi_write.setparams( tuple(newparams) ) wafi.setpos(start) wafi_write.writeframes(wafi.readframes(stop-start)) wafi_write.close() wafi.close() # ogg encode it, queue is used for sync for encode_to in encoders: encoding_queue.put(trackfilename) p = multiprocessing.Process(target=encode_to, args=( encoding_queue, trackfilename, track )) p.start() # wait until all task are finished while not encoding_queue.empty(): time.sleep(1.0) keep_alive = None def __str__(self): output = 'REM COMMENT CUE JOIN\n' if self.cd_genre: output += 'REM GENRE "{}"\n'.format(self.cd_genre) if self.cd_date: output += 'REM DATE {}\n'.format(self.cd_date) if self.cd_performer: output += 'PERFORMER "{}"\n'.format(self.cd_performer) if self.cd_title: output += 'TITLE "{}"\n'.format(self.cd_title) one_file = self.tracks[0].file == self.tracks[-1].file if one_file: output += u'FILE "{}" WAVE\n'.format(self.current_file).encode('latin-1') for i, track in enumerate(self.tracks): output += ' TRACK {:02d} AUDIO\n'.format(i+1) output += ' TITLE "{}"\n'.format(track.title) if self.cd_performer != track.performer: output += ' PERFORMER "{}"\n'.format(track.performer) if not one_file: output += ' FILE "{}" WAVE\n'.format(track.file) for idx in sorted(track.time.keys()): t = track.time[idx] #~ print t mins = t // (60*75) t -= mins * (60*75) sec = t // 75 t -= sec * 75 rest = t output += ' INDEX {:02d} {:02d}:{:02d}:{:02d}\n'.format(idx, int(mins), int(sec), rest) return output def __analyze_wave(self, trackfile): wafi = wave.open(trackfile, 'rb') param_names = ('nchannels', 'sampwidth', 'framerate', 'nframes', 'comptype', 'compname') params = wafi.getparams() param_dict = dict(zip(param_names, params)) wafi.close() return param_dict, params def join(self, cue_obj, wave_filename=u'join'): self.current_file = wave_filename + u'.wav' wafo = wave.open(self.current_file, 'wb') set_params = True for i, track in enumerate(self.tracks): Decode(track.file) if set_params: set_params = False pdict, param = self.__analyze_wave(track.file) #~ print pdict['nframes'] / (pdict['framerate'] // 75) wafo.setparams(param) wafi = wave.open(track.file, 'rb') pdict, param = self.__analyze_wave(track.file) # calculate number of frames start = pdict['framerate'] * track.time.get(0, track.time[1]) // 75 stop = pdict['nframes'] if len(self.tracks) > i+1 and self.tracks[i+1].file == track.file: stop = pdict['framerate'] * self.tracks[i+1].time.get(0, self.tracks[i+1].time[1]) // 75 print start, stop, pdict['nframes'] wafi.setpos(start) wafo.writeframes(wafi.readframes(stop-start)) wafi.close() track.file = self.current_file # second part time_offset = pdict['nframes']*75 // pdict['framerate'] for i, track in enumerate(cue_obj.tracks): Decode(track.file) wafi = wave.open(track.file, 'rb') pdict, param = self.__analyze_wave(track.file) # calculate number of frames start = pdict['framerate'] * track.time.get(0, track.time[1]) // 75 stop = pdict['nframes'] if len(cue_obj.tracks) > i+1 and cue_obj.tracks[i+1].file == track.file: stop = pdict['framerate'] * cue_obj.tracks[i+1].time.get(0, cue_obj.tracks[i+1].time[1]) // 75 print start, stop, pdict['nframes'] wafi.setpos(start) wafo.writeframes(wafi.readframes(stop-start)) wafi.close() track.file = self.current_file for key, value in cue_obj.tracks[i].time.iteritems(): cue_obj.tracks[i].time[key] = value + time_offset self.tracks += cue_obj.tracks with open(wave_filename+u'.cue', 'w') as f: f.write( str(self) ) if __name__ == "__main__": cue_files = [] for filename in os.listdir(u'.'): if os.path.isfile(filename) and filename.lower().endswith(u'.cue'): cue_files.append(filename) cue_objs = [] joined_filename = None for f in sorted(cue_files): if not joined_filename: joined_filename = f else: for i, c in enumerate(f): if joined_filename[i] != c: joined_filename = joined_filename[:i] break cue = CueSheet(f) cue.read() cue_objs.append(cue) joined_filename = joined_filename.rstrip(u'CD').rstrip() #~ print joined_filename x = cue_objs[0].join(cue_objs[1], joined_filename)
By the time Sam Rockwell shows up in Loitering with Intent, if you’re not vicariously stressed out like our lead writer, then you will be. He is one of the many characters – indeed, characters in every sense of the word – that finds a way to a country home where two best friends and colleagues have holed up to write a screenplay set to make their careers. Dominic (Michael Godere) is the more hopeful, maybe of the pair, as his bartending friend Raphael (Ivan Martin), at 40, maybe has given up one being a Hollywood success story. Nevertheless, a chance meeting has them with 10 days to make their detective story into a script with a shot at a movie, and stealing away for physical, mental, and emotional space is sure to help. This comedic tale tends more towards the farcical than the sincere as a trove of friends and strangers set upon this retreat by accident or fate. Physical proximity may be the only things these caricatures share, because everyone is in their own absurd world with a warped, monomaniacal vision of what exists around them. There’s a surfer, a coquette, a feuding couple, and crises all around, and everyone has feels a singular change will better their life – and everyone is jealous of the next. At least the men are. This screenplay begins to take a backseat though, as alcohol mixes with feelings and inhibitions run wild amid fear of the unpredictable future and uncertain present. The developments are neither surprising nor novel, but genuine nonetheless, worth investing in due to the likeable characters; even if they are more thinly drawn than layered. Rockwell stands out, as he always does, playing this role of loveable gnat, of indefatigable man-child (see Laggies, The Way, Way Back). Marissa Tomei is Gigi, another wayward soul teetering on the line between endless bliss and losing complete control: she and Rockwell’s character make a fun couple worth of a spinoff. The title of fare directed by Adam Rapp and written by our two male leads is fitting, almost in a negative way however. That is, nothing much happens save for fleeting outpouring of emotions and romances, and drunken, defenseless conversations that may or may not touch on something meaningful. Charming, entertaining, and fairly empty, the ‘loitering’ is there for sure, but it’s lacking in ‘intent. Beauty and the Beast is sure to join the ranks of the original, pleasing audiences for years to come. How do you solve a problem like Dead Meryl?
# Copyright (c) 2012, Johan Rydberg # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """Example that builds a ring of actors and then send a message through the ring. """ from guild import actor import gevent def forward(receive, address): pat, data = receive() address | data def build(receive, n): ring = [] for i in range(n): if not ring: node = actor.spawn(forward, actor.curaddr()) else: node = actor.spawn(forward, ring[-1]) ring.append(node) gevent.sleep() ring[-1] | {'text': 'hello around the ring'} pat, data = receive() return data mesh = actor.Mesh() node = actor.Node(mesh, '[email protected]:3232') addr = node.spawn(build, 10000) print node.wait(addr)
The parents of a bullied 10-year-old boy who is brain dead and on life support after trying to take his own life have told of their grief. Landen Lewis tried to take his own life in his bedroom after returning home from school in Nevada, Missouri, last Friday, the Daily Mail reports. He gave no warning to his mother Britany and stepfather Jon about what he was planning and was even "giddy", they said. Now, he is brain dead in the hospital. His parents are about to turn off his life support and say they will donate his organs to other children, something he'd expressed interest in in the past. Since his suicide attempt, the family says they have been told by his friends that he was being "picked on" by teachers. It is unclear how long after he tried to take his own life his parents found him. Describing finding him, Britany said: "I was like no. It's not real. It's not real. All I could think was I've got to get him to breathe. "I've got to get him to breathe." Bryant Elementary School, where he was a student, has also been in touch to tell her that he had been in trouble repeatedly and was removed from class several times but teachers would not tell her why, she said. "I didn't even know he'd been in trouble three times until the principal had called me and told me that she had pulled him out of class three times and wouldn't tell me why," his mother told Fox 4. The boy's mother and stepfather have said repeatedly that he was bullied, though it is not clear if they mean he was victimized by school staff or by other children. "My beautiful intelligent baby boy was an old soul, he would help anyone who needed help and would give you the shirt off his back. "He wanted to take everyone's pain away and he wanted no one to feel alone. "My baby was someone who could carry a conversation with you about the universe. "What happened to my child could have been prevented had everyone just been nice to him once or twice," Britany said in a Facebook post on Monday. She went on to say that while she did her best, the school "failed" him. "As parents we trust a school to take our child for 8 hrs a day in these 8 hrs they are suppose to expand our child's mind and help them grow as little humans. "The school failed my son and I pray no other child has to go through what my little precious boy has. "Bullying is not okay no matter what age, and grown people should never bully a child. "Love your babies hold them close and whisper a prayer that no child ever go through this," she said. A spokesman for Bryant Elementary School did not immediately respond to DailyMail.com's inquiries on Wednesday morning. They previously declined to go into details of the boy's death, telling Fox 4 they could not discuss them because they were limited by privacy laws. According to his mother, Landen had once told that her that he wanted to donate his organs to other children if he ever died. "His step dad made a joke that went like, 'I plan to use all of mine but they are welcome to whats left' when he put the organ donor on his driver's license and so Landen asked about it and after explaining it all to him, he said that if anything ever happened to him that he would want to donate to," she told DailyMail.com on Wednesday.
#!/usr/bin/env python3 """This module contains code that interacts with the Telemachus mod to access KSP telemetry""" import json import urllib.error import urllib.parse import urllib.request from basagc import config from basagc import utils if config.DEBUG: from pudb import set_trace # lint:ok telemetry = {} commands = {} class TelemetryNotAvailable(Exception): """This exception should be raised when we do not have a list of available telemetry""" pass class KSPNotConnected(Exception): """ This exception should be raised when there is no connection to KSP """ pass def check_connection(): """ Checks if there is a connection available to Telemachus Returns True if so, False otherwise """ try: urllib.request.urlopen(config.URL + "paused=p.paused") except urllib.error.URLError: return False else: return True def get_api_listing(): """ Gets the list of API calls provided by Telemachus :rtype: dict """ global telemetry global commands try: response = urllib.request.urlopen(config.URL + "api=a.api") except urllib.error.URLError: raise KSPNotConnected response_string = response.read().decode('utf-8') data = json.loads(response_string) for a in data.values(): for b in a: if b["apistring"].startswith("b."): name = "body_" + b["apistring"].rsplit(".", 1)[1] elif b["apistring"].startswith("tar."): name = "target_" + b["apistring"].rsplit(".", 1)[1] elif b["apistring"].startswith("f.") or b["apistring"].startswith("mj.") or \ b["apistring"].startswith("v.set"): command = b["apistring"].rsplit(".", 1)[1] commands[command] = b["apistring"] continue else: name = b["apistring"].rsplit(".", 1)[1] telemetry[name] = b["apistring"] def get_telemetry(data, body_number=None): """ Contacts telemachus for the requested data. :param data: The API call required :type data: str | float :param body_number: Specify which body to obtain data for :type body_number: string :rtype: string """ # if telemetry is None: # raise TelemetryNotAvailable try: query_string = data + "=" + telemetry[data] except KeyError: raise KSPNotConnected return if body_number: query_string += "[{}]".format(body_number) try: raw_response = urllib.request.urlopen(config.URL + query_string) except urllib.error.URLError: utils.log("Query string: {}".format(query_string), log_level="ERROR") utils.log("Caught exception urllib2.URLERROR", log_level="ERROR") raise KSPNotConnected response_string = raw_response.read().decode("utf-8)") json_response = json.loads(response_string) return json_response[data] # def enable_smartass(): # query_string = "command=" def set_mechjeb_smartass(direction): command_string = "command=" + commands[direction] send_command_to_ksp(command_string) def disable_smartass(): command_string = "command=" + commands["smartassoff"] send_command_to_ksp(command_string) def set_throttle(throttle_percent): if throttle_percent == 0: throttle_magnitude = 0 else: throttle_magnitude = throttle_percent / 100.0 command_string = "command=" + commands["setThrottle"] + "[" + str(throttle_magnitude) + "]" send_command_to_ksp(command_string) def cut_throttle(): command_string = "command=" + commands["throttleZero"] send_command_to_ksp(command_string) def send_command_to_ksp(command_string): try: urllib.request.urlopen(config.URL + command_string) except urllib.error.URLError: utils.log("Query string: {}".format(command_string), log_level="ERROR") utils.log("Caught exception urllib2.URLERROR", log_level="ERROR") raise KSPNotConnected def print_all_telemetry(): print("Telemetry available:") for item in sorted(telemetry): print("- " + item) print() print("Commands available:") for item in sorted(commands): print("- " + item) def add_maneuver_node(ut, delta_v): ut = str(round(ut, 2)) delta_v_x = str(round(delta_v[0], 2)) delta_v_y = str(round(delta_v[1], 2)) delta_v_z = str(round(delta_v[2], 2)) command_string = "command=" + telemetry["addManeuverNode"] + "[" + str(ut) + "," + delta_v_x + "," + delta_v_y + "," + delta_v_z + "]" send_command_to_ksp(command_string) def update_maneuver_node(ut, delta_v): ut = str(round(ut, 2)) delta_v_x = str(round(delta_v[0], 2)) delta_v_y = str(round(delta_v[1], 2)) delta_v_z = str(round(delta_v[2], 2)) command_string = "command=" + telemetry["updateManeuverNode"] + "[0," + str(ut) + "," + delta_v_x + "," + delta_v_y + "," + delta_v_z + "]" send_command_to_ksp(command_string)
James Beard didn’t live in Greenwich Village for nothing. Born in Portland, Oregon, in 1903, his earliest culinary memories included having his mother and the family’s Chinese cook make food for him when he came down with malaria at the age of three. After a stint of living in Paris, he returned stateside to attend Reed College back in Portland, where he was kicked out for engaging in homosexual activity (those who know Reed College today may let out a guffaw). Nevertheless, his gayness remained an open secret, unknown to most of his millions of admirers, until he came out in his autobiography late in life. Looking over these incredibly clever quotes, which seem to have been written yesterday, we wonder if he wasn’t sent back to the 1950s in a sort of foodie time machine to pave the way for us today. Of him, the also-queer New York Times food editor and critic Craig Claiborne noted, “Physically he was the connoisseur’s connoisseur. He was a giant panda, Santa Claus and the Jolly Green Giant rolled into one. On him, a lean and slender physique would have looked like very bad casting.” Beard passed away in 1985. Here, then, are our 10 favorite James Beard quotes. Visit the James Beard Foundation.
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utilities for handling ISO 8601 duration format. """ import random import re import time from heat.common.i18n import _ iso_duration_re = re.compile('PT(?:(\d+)H)?(?:(\d+)M)?(?:(\d+)S)?$') wallclock = time.time class Duration(object): ''' Note that we don't attempt to handle leap seconds or large clock jumps here. The latter are assumed to be rare and the former negligible in the context of the timeout. Time zone adjustments, Daylight Savings and the like *are* handled. PEP 418 adds a proper monotonic clock, but only in Python 3.3. ''' def __init__(self, timeout=0): self._endtime = wallclock() + timeout def expired(self): return wallclock() > self._endtime def endtime(self): return self._endtime def parse_isoduration(duration): """ Convert duration in ISO 8601 format to second(s). Year, Month, Week, and Day designators are not supported. Example: 'PT12H30M5S' """ result = iso_duration_re.match(duration) if not result: raise ValueError(_('Only ISO 8601 duration format of the form ' 'PT#H#M#S is supported.')) t = 0 t += (3600 * int(result.group(1))) if result.group(1) else 0 t += (60 * int(result.group(2))) if result.group(2) else 0 t += int(result.group(3)) if result.group(3) else 0 return t def retry_backoff_delay(attempt, scale_factor=1.0, jitter_max=0.0): """ Calculate an exponential backoff delay with jitter. Delay is calculated as 2^attempt + (uniform random from [0,1) * jitter_max) :param attempt: The count of the current retry attempt :param scale_factor: Multiplier to scale the exponential delay by :param jitter_max: Maximum of random seconds to add to the delay :returns: Seconds since epoch to wait until """ exp = float(2 ** attempt) * float(scale_factor) if jitter_max == 0.0: return exp return exp + random.random() * jitter_max
I'd like to wish a happy & safe Memorial Day weekend to all of my US friends! Hope it's filled with beautiful weather, BBQs, and lots of R&R. In celebration of this holiday weekend, I have a new patriotic-themed kit that will fill your heart with star-spangled happiness! Free Bird is sure to release the American pride and creativity inside you! You can grab the kit & paper pack separately or at a discount when you buy them together. Here are just a couple of sample layouts to get your creative juices flowing. Be sure to check myFacebook page for more samples over the weekend! Retirement SaleIn case you missed the news in the Sweet Shoppe newsletter, we are saying goodbye to two of our long-time designers. Darcy Baldwin & Julie Billingsley have made the decision to retire from digital designing to pursue opportunities in other fields that will take their careers in new and exciting directions! I've loved getting to work with these ladies and wish them both nothing but the best. Both Julie's & Darcy's stores are 50% off through next Saturday (May 31). That includes 2 of my favorite collabs I've ever done! You'll want to grab them this week because they're retiring for good once Julie's store closes. Free With PurchaseWe also have a new Free With Purchase offer for you this weekend! Studio Flergs created this gorgeous kit as part of her Featured Designer spotlight. It's yours with your $10 purchase in the shoppe!
# pegar a quantidade de horas totais em cada gênero SELECT genre.name, SUM(hours) AS soma FROM rel_player_software INNER JOIN rel_software_genre ON rel_player_software.fk_software = rel_software_genre.fk_software INNER JOIN genre ON rel_software_genre.fk_genre = genre.id_genre GROUP BY rel_software_genre.fk_genre ORDER BY soma DESC LIMIT 5 #teste SELECT genre.name AS genero, SUM(rel_player_software.hours) AS horas FROM rel_player_software INNER JOIN player ON rel_player_software.fk_player = player.id_player INNER JOIN software ON rel_player_software.fk_software = software.id_software INNER JOIN genre ON rel_software_genre = genre.id_genre LIMIT 1 #teste SELECT player.name AS jogador, genre.name AS genre, SUM(rel_player_software.hours) AS horas FROM rel_player_software INNER JOIN player ON rel_player_software.fk_player = player.id_player INNER JOIN software ON rel_player_software.fk_software = software.id_software INNER JOIN rel_software_genre ON rel_software_genre.fk_software = software.id_software INNER JOIN genre ON rel_software_genre.fk_genre = genre.id_genre GROUP BY jogador ORDER BY horas DESC LIMIT 3 #teste (SELECT player.username AS jogador, genre.name AS genero, SUM(rel_player_software.hours) AS horas FROM rel_player_software INNER JOIN player ON rel_player_software.fk_player = player.id_player INNER JOIN rel_software_genre ON rel_software_genre.fk_software = rel_player_software.fk_software INNER JOIN genre ON rel_software_genre.fk_genre = genre.id_genre WHERE player.id_player <= 3 GROUP BY jogador, genero ORDER BY jogador, horas DESC) # pegar o gênero mais jogado de cada jogador SELECT jogador, genero, MAX(horas) FROM (SELECT player.username AS jogador, genre.name AS genero, SUM(rel_player_software.hours) AS horas FROM (rel_player_software INNER JOIN player ON rel_player_software.fk_player = player.id_player INNER JOIN rel_software_genre ON rel_software_genre.fk_software = rel_player_software.fk_software INNER JOIN genre ON rel_software_genre.fk_genre = genre.id_genre) GROUP BY jogador, genero ORDER BY jogador, horas DESC) p GROUP BY jogador
Tassaght.org is 1 month old (current registration since 09 March 2019). tassaght.org is known by our system for 1 month. His IP address is 198.71.232.3 and there's at least 100 websites associated with this ip, it's a shared host.
##### # # Module name: dupApprise.py # Purpose: Management class for Apprise notification service # # Notes: Uses the Apprise push notification utility from @caronc # https://github.com/caronc/apprise # For any Apprise support or feature requests, please see the Apprise GitHub site # ##### # Import system modules import db import drdatetime # Import dupReport modules import globs class dupApprise: appriseConn = None appriseOpts = None services = None def __init__(self): globs.log.write(globs.SEV_NOTICE, function='Apprise', action='Init', msg='Initializing Apprise support') import apprise # Read name/value pairs from [apprise] section self.appriseOpts = globs.optionManager.getRcSection('apprise') if 'services' not in self.appriseOpts: globs.log.write(globs.SEV_ERROR, function='Apprise', action='Init', msg='Error: No services defined for Apprise notification') globs.closeEverythingAndExit(1) # Abort program. Can't continue # Set defaults for missing values self.appriseOpts['title'] = 'Apprise Notification for #SRCDEST# Backup' if 'title' not in self.appriseOpts else self.appriseOpts['title'] self.appriseOpts['body'] = 'Completed at #COMPLETETIME#: #RESULT# - #ERRMSG#' if 'body' not in self.appriseOpts else self.appriseOpts['body'] self.appriseOpts['titletruncate'] = '0' if 'titletruncate' else self.appriseOpts['titletruncate'] self.appriseOpts['bodytruncate'] = '0' if 'bodytruncate' not in self.appriseOpts else self.appriseOpts['bodytruncate'] self.appriseOpts['msglevel'] = 'failure' if 'msglevel' not in self.appriseOpts else self.appriseOpts['msglevel'] # Normalize .rc values self.appriseOpts['titletruncate'] = int(self.appriseOpts['titletruncate']) self.appriseOpts['bodytruncate'] = int(self.appriseOpts['bodytruncate']) self.appriseOpts['msglevel'] = self.appriseOpts['msglevel'].lower() # Check for correct message level indicator if self.appriseOpts['msglevel'] not in ('success', 'warning', 'failure'): globs.log.write(globs.SEV_ERROR, function='Apprise', action='Init', msg='Error: Bad apprise message level: {}'.format(self.appriseOpts['msglevel'])) globs.closeEverythingAndExit(1) # Abort program. Can't continue. # Initialize apprise library result = self.appriseConn = apprise.Apprise() globs.log.write(globs.SEV_NOTICE, function='Apprise', action='Init', msg='Initializing Apprise library. Result={}'.format(result)) # Add individual service URLs to connection self.services = self.appriseOpts['services'].split(",") for i in self.services: result = self.appriseConn.add(i) globs.log.write(globs.SEV_NOTICE, function='Apprise', action='Init', msg='Added service {}, result={}'.format(i, result)) globs.log.write(globs.SEV_NOTICE, function='Apprise', action='Init', msg='Apprise Initialization complete.') return None def parseMessage(self, msg, source, destination, result, message, warningmessage, errormessage, completetime): globs.log.write(globs.SEV_NOTICE, function='Apprise', action='parseMessage', msg=msg) newMsg = msg newMsg = newMsg.replace('#SOURCE#',source) newMsg = newMsg.replace('#DESTINATION#',destination) newMsg = newMsg.replace('#SRCDEST#','{}{}{}'.format(source, globs.opts['srcdestdelimiter'], destination)) newMsg = newMsg.replace('#RESULT#',result) newMsg = newMsg.replace('#MESSAGE#',message) newMsg = newMsg.replace('#ERRMSG#',errormessage) newMsg = newMsg.replace('#WARNMSG#',warningmessage) newMsg = newMsg.replace('#COMPLETETIME#','{} {}'.format(completetime[0], completetime[1])) globs.log.write(globs.SEV_NOTICE, function='Apprise', action='parseMessage', msg='New message=[{}]'.format(newMsg)) return newMsg def sendNotifications(self): sqlStmt = "SELECT source, destination, parsedResult, messages, warnings, errors, timestamp FROM report ORDER BY source" dbCursor = globs.db.execSqlStmt(sqlStmt) reportRows = dbCursor.fetchall() for source, destination, parsedResult, messages, warnings, errors, timestamp in reportRows: globs.log.write(globs.SEV_NOTICE, function='Apprise', action='sendNotifications', msg='Preparing Apprise message for {}-{}, parsedResult={} msglevel={}'.format(source, destination, parsedResult, self.appriseOpts['msglevel'])) # See if we need to send a notification based on the result status if self.appriseOpts['msglevel'] == 'warning': if parsedResult.lower() not in ('warning', 'failure'): globs.log.write(globs.SEV_NOTICE, function='Apprise', action='sendNotifications', msg='Msglevel mismatch at warning level - skipping') continue elif self.appriseOpts['msglevel'] == 'failure': if parsedResult.lower() != 'failure': globs.log.write(globs.SEV_NOTICE, function='Apprise', action='sendNotifications', msg='Msglevel mismatch at failure level - skipping') continue globs.log.write(globs.SEV_DEBUG, function='Apprise', action='sendNotifications', msg='Apprise message is sendable.') newTitle = self.parseMessage(self.appriseOpts['title'], source, destination, parsedResult, messages, warnings, errors, drdatetime.fromTimestamp(timestamp)) newBody = self.parseMessage(self.appriseOpts['body'], source, destination, parsedResult, messages, warnings, errors, drdatetime.fromTimestamp(timestamp)) tLen = self.appriseOpts['titletruncate'] if tLen != 0: newTitle = (newTitle[:tLen]) if len(newTitle) > tLen else newTitle bLen = self.appriseOpts['bodytruncate'] if bLen!= 0: newBody = (newBody[:bLen]) if len(newBody) > bLen else newBody globs.log.write(globs.SEV_DEBUG, function='Apprise', action='sendNotifications', msg='Sending notification: Title=[{}] Body=[{}]'.format(newTitle, newBody)) result = self.appriseConn.notify(title=newTitle, body=newBody) globs.log.write(globs.SEV_NOTICE, function='Apprise', action='sendNotifications', msg='Apprise sent. Result={}.'.format(result)) return
The Residential and Campus Life office is based Edward Llwyd Building at our Wrexham campus. We are a friendly and helpful team who aim to make your stay here at Wrexham Glyndwr University as enjoyable as possible. We aim to support all residents in our accommodation to enjoy their stay and were so proud to receive the 2015-16 Best Support Staff Award as voted for by the students. If there is anything we can help you with don't hesitate to get in touch. You can email us on [email protected] or call 01978 293344.
#分别得到min,max和Union三个含有时间的txt import psycopg2 #链接到我的PostgreSQL数据库 connection = psycopg2.connect(database="postgres", user="postgres", host="127.0.0.1", port='5432', password="kwy17502X") print(1) #获取游标 cursor = connection.cursor() print(2) try: cursor.execute("drop table min") cursor.execute("drop table max") # cursor.execute("drop table uniontime") cursor.execute("CREATE TABLE min (id serial PRIMARY KEY, userid text , min text);") print(3) cursor.execute("CREATE TABLE max (id serial PRIMARY KEY, userid text , max text);") print(4) #将max导入数据库 filemax = open('E:/max.txt') linesmax = filemax.readlines() filemax.close() numbermax=1 for linemax in linesmax: if(numbermax%2): linemax = linemax.split(',') linemax[1]=linemax[1].replace('\"','') print(linemax[0]) # 修改表名 cursor.execute("INSERT INTO max (userid,max) VALUES (%s,%s)", (linemax[0],linemax[1])) numbermax=numbermax+1 #将min导入数据库 filemin = open('E:/min.txt') linesmin = filemin.readlines() filemin.close() numbermin = 1 for linemin in linesmin: if (numbermin % 2): linemin = linemin.split(',') linemin[1] = linemin[1].replace('\"', '') print(linemin[0]) # 修改表名 cursor.execute("INSERT INTO min (userid,min) VALUES (%s,%s)", (linemin[0], linemin[1])) numbermin = numbermin + 1 #导出union结果到union.txt cursor.execute("copy(SELECT max.userid, max.max,min.min FROM max INNER JOIN min ON max.userid=min.userid ) to 'E:/union.txt' with csv;") #将结果存入数据库——未完成 # cursor.execute("CREATE TABLE uniontime (id serial PRIMARY KEY , userid text , maxtime text , mintime text);") # numberunion=0 # fileunion = open('E:/union.txt') # linesunion = fileunion.readlines() # fileunion.close() # numberunion = 1 # for lineunion in linesunion: # lineunion=lineunion.replace('\"','') # lineunion = lineunion.replace('\n', '') # lineunion = lineunion.split(',') # print(lineunion) # print(133) # if ((numberunion % 3)==1): # 修改表名 # numberunion = numberunion + 1 # cursor.execute("INSERT INTO uniontime (userid,maxtime) VALUES (%s,%s,%s)", (lineunion[0], str(lineunion[1]))) # if ((numberunion % 3)==2): # # 修改表名 # numberunion = numberunion + 1 # cursor.execute("INSERT INTO uniontime (mintime) VALUES (%s)", (str(lineunion[1]),)) # if ((numberunion % 3)==0): # numberunion = numberunion + 1 except Exception as e: print(repr(e)) connection.commit() connection.close()
My trip to Oktoberfest 2007 was amazing. From the beginning, the staff was very helpful. They did a wonderful job of keeping us updated and informed. My friends and I have already started discussing our next trip to Munich!
# Copyright 2012 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import re import time from zuul import exceptions from zuul.model import Change, Ref from zuul.source import BaseSource # Walk the change dependency tree to find a cycle def detect_cycle(change, history=None): if history is None: history = [] else: history = history[:] history.append(change.number) for dep in change.needs_changes: if dep.number in history: raise Exception("Dependency cycle detected: %s in %s" % ( dep.number, history)) detect_cycle(dep, history) class GerritSource(BaseSource): name = 'gerrit' log = logging.getLogger("zuul.source.Gerrit") replication_timeout = 300 replication_retry_interval = 5 depends_on_re = re.compile(r"^Depends-On: (I[0-9a-f]{40})\s*$", re.MULTILINE | re.IGNORECASE) def getRefSha(self, project, ref): refs = {} try: refs = self.connection.getInfoRefs(project) except: self.log.exception("Exception looking for ref %s" % ref) sha = refs.get(ref, '') return sha def _waitForRefSha(self, project, ref, old_sha=''): # Wait for the ref to show up in the repo start = time.time() while time.time() - start < self.replication_timeout: sha = self.getRefSha(project.name, ref) if old_sha != sha: return True time.sleep(self.replication_retry_interval) return False def isMerged(self, change, head=None): self.log.debug("Checking if change %s is merged" % change) if not change.number: self.log.debug("Change has no number; considering it merged") # Good question. It's probably ref-updated, which, ah, # means it's merged. return True data = self.connection.query(change.number) change._data = data change.is_merged = self._isMerged(change) if change.is_merged: self.log.debug("Change %s is merged" % (change,)) else: self.log.debug("Change %s is not merged" % (change,)) if not head: return change.is_merged if not change.is_merged: return False ref = 'refs/heads/' + change.branch self.log.debug("Waiting for %s to appear in git repo" % (change)) if self._waitForRefSha(change.project, ref, change._ref_sha): self.log.debug("Change %s is in the git repo" % (change)) return True self.log.debug("Change %s did not appear in the git repo" % (change)) return False def _isMerged(self, change): data = change._data if not data: return False status = data.get('status') if not status: return False if status == 'MERGED': return True return False def canMerge(self, change, allow_needs): if not change.number: self.log.debug("Change has no number; considering it merged") # Good question. It's probably ref-updated, which, ah, # means it's merged. return True data = change._data if not data: return False if 'submitRecords' not in data: return False try: for sr in data['submitRecords']: if sr['status'] == 'OK': return True elif sr['status'] == 'NOT_READY': for label in sr['labels']: if label['status'] in ['OK', 'MAY']: continue elif label['status'] in ['NEED', 'REJECT']: # It may be our own rejection, so we ignore if label['label'].lower() not in allow_needs: return False continue else: # IMPOSSIBLE return False else: # CLOSED, RULE_ERROR return False except: self.log.exception("Exception determining whether change" "%s can merge:" % change) return False return True def postConfig(self): pass def getChange(self, event, project): if event.change_number: refresh = False change = self._getChange(event.change_number, event.patch_number, refresh=refresh) else: change = Ref(project) change.connection_name = self.connection.connection_name change.ref = event.ref change.oldrev = event.oldrev change.newrev = event.newrev change.url = self._getGitwebUrl(project, sha=event.newrev) return change def _getChange(self, number, patchset, refresh=False, history=None): key = '%s,%s' % (number, patchset) change = self.connection.getCachedChange(key) if change and not refresh: return change if not change: change = Change(None) change.connection_name = self.connection.connection_name change.number = number change.patchset = patchset key = '%s,%s' % (change.number, change.patchset) self.connection.updateChangeCache(key, change) try: self._updateChange(change, history) except Exception: self.connection.deleteCachedChange(key) raise return change def getProjectOpenChanges(self, project): # This is a best-effort function in case Gerrit is unable to return # a particular change. It happens. query = "project:%s status:open" % (project.name,) self.log.debug("Running query %s to get project open changes" % (query,)) data = self.connection.simpleQuery(query) changes = [] for record in data: try: changes.append( self._getChange(record['number'], record['currentPatchSet']['number'])) except Exception: self.log.exception("Unable to query change %s" % (record.get('number'),)) return changes def _getDependsOnFromCommit(self, message, change): records = [] seen = set() for match in self.depends_on_re.findall(message): if match in seen: self.log.debug("Ignoring duplicate Depends-On: %s" % (match,)) continue seen.add(match) query = "change:%s" % (match,) self.log.debug("Updating %s: Running query %s " "to find needed changes" % (change, query,)) records.extend(self.connection.simpleQuery(query)) return records def _getNeededByFromCommit(self, change_id, change): records = [] seen = set() query = 'message:%s' % change_id self.log.debug("Updating %s: Running query %s " "to find changes needed-by" % (change, query,)) results = self.connection.simpleQuery(query) for result in results: for match in self.depends_on_re.findall( result['commitMessage']): if match != change_id: continue key = (result['number'], result['currentPatchSet']['number']) if key in seen: continue self.log.debug("Updating %s: Found change %s,%s " "needs %s from commit" % (change, key[0], key[1], change_id)) seen.add(key) records.append(result) return records def _updateChange(self, change, history=None): self.log.info("Updating %s" % (change,)) data = self.connection.query(change.number) change._data = data if change.patchset is None: change.patchset = data['currentPatchSet']['number'] if 'project' not in data: raise exceptions.ChangeNotFound(change.number, change.patchset) change.project = self.sched.getProject(data['project']) change.branch = data['branch'] change.url = data['url'] max_ps = 0 files = [] for ps in data['patchSets']: if ps['number'] == change.patchset: change.refspec = ps['ref'] for f in ps.get('files', []): files.append(f['file']) if int(ps['number']) > int(max_ps): max_ps = ps['number'] if max_ps == change.patchset: change.is_current_patchset = True else: change.is_current_patchset = False change.files = files change.is_merged = self._isMerged(change) change.approvals = data['currentPatchSet'].get('approvals', []) change.open = data['open'] change.status = data['status'] change.owner = data['owner'] if change.is_merged: # This change is merged, so we don't need to look any further # for dependencies. self.log.debug("Updating %s: change is merged" % (change,)) return change if history is None: history = [] else: history = history[:] history.append(change.number) needs_changes = [] if 'dependsOn' in data: parts = data['dependsOn'][0]['ref'].split('/') dep_num, dep_ps = parts[3], parts[4] if dep_num in history: raise Exception("Dependency cycle detected: %s in %s" % ( dep_num, history)) self.log.debug("Updating %s: Getting git-dependent change %s,%s" % (change, dep_num, dep_ps)) dep = self._getChange(dep_num, dep_ps, history=history) # Because we are not forcing a refresh in _getChange, it # may return without executing this code, so if we are # updating our change to add ourselves to a dependency # cycle, we won't detect it. By explicitly performing a # walk of the dependency tree, we will. detect_cycle(dep, history) if (not dep.is_merged) and dep not in needs_changes: needs_changes.append(dep) for record in self._getDependsOnFromCommit(data['commitMessage'], change): dep_num = record['number'] dep_ps = record['currentPatchSet']['number'] if dep_num in history: raise Exception("Dependency cycle detected: %s in %s" % ( dep_num, history)) self.log.debug("Updating %s: Getting commit-dependent " "change %s,%s" % (change, dep_num, dep_ps)) dep = self._getChange(dep_num, dep_ps, history=history) # Because we are not forcing a refresh in _getChange, it # may return without executing this code, so if we are # updating our change to add ourselves to a dependency # cycle, we won't detect it. By explicitly performing a # walk of the dependency tree, we will. detect_cycle(dep, history) if (not dep.is_merged) and dep not in needs_changes: needs_changes.append(dep) change.needs_changes = needs_changes needed_by_changes = [] if 'neededBy' in data: for needed in data['neededBy']: parts = needed['ref'].split('/') dep_num, dep_ps = parts[3], parts[4] self.log.debug("Updating %s: Getting git-needed change %s,%s" % (change, dep_num, dep_ps)) dep = self._getChange(dep_num, dep_ps) if (not dep.is_merged) and dep.is_current_patchset: needed_by_changes.append(dep) for record in self._getNeededByFromCommit(data['id'], change): dep_num = record['number'] dep_ps = record['currentPatchSet']['number'] self.log.debug("Updating %s: Getting commit-needed change %s,%s" % (change, dep_num, dep_ps)) # Because a commit needed-by may be a cross-repo # dependency, cause that change to refresh so that it will # reference the latest patchset of its Depends-On (this # change). dep = self._getChange(dep_num, dep_ps, refresh=True) if (not dep.is_merged) and dep.is_current_patchset: needed_by_changes.append(dep) change.needed_by_changes = needed_by_changes return change def getGitUrl(self, project): return self.connection.getGitUrl(project) def _getGitwebUrl(self, project, sha=None): return self.connection.getGitwebUrl(project, sha)
Across the nation, fishermen and scientists are observing notable shifts in the ocean ecosystem and dramatic changes on the water. This series of four short films examines how the fishing industry is dealing with climate change, including warming waters, lack of biodiversity, and ocean acidification. The fourth film introduces shellfish aquaculture as an example of economic diversification for fishing communities. The films take place in Maine, Alaska, and Florida, and were produced by the Island Institute’s Scott Sell.
# This Python file uses the following encoding: utf-8 from django.db import models from django.utils.translation import ugettext_lazy as _ class Project(models.Model): area = models.CharField(_('área'), max_length=100, blank=False, null=False) tema = models.CharField(_('tema'), max_length=200, blank=False, null=False) descricao = models.CharField(_('descrição'), max_length=400, blank=False, null=False) universidade = models.CharField(_('universidade'), max_length=200, null=False) universidadeOrientador = models.CharField(_('orientador'), max_length=200, blank=True, null=True) liderNome = models.CharField(_('líder'), max_length=200, null=False, blank=False) liderTelefone = models.CharField(_('telefone'), max_length=20, blank=False, null=False) liderEmail = models.EmailField(_('email'), max_length=100, null=False, blank=False) liderSocial = models.CharField(_('rede social'), max_length=200, blank=True) liderIntegrantes = models.CharField(_('integrantes'), max_length=400, blank=True, null=True) link_slides = models.CharField(_('slides'), max_length=300, blank=True) link_monografia = models.CharField(_('monografia'), max_length=300, blank=True) link_modelagem = models.CharField(_('modelagem'), max_length=300, blank=True) link_website = models.CharField(_('website'), max_length=300, blank=True) link_outros = models.CharField(_('outros'), max_length=300, blank=True) link_versionamento = models.CharField(_('versionamento'), max_length=300, blank=True) etapa = models.CharField(_('etapa'), max_length=3, blank=True) tags = models.CharField(_('tags'), max_length=300, blank=True) ativo = models.CharField(_('ativo'), max_length=3, default='VAL') dataAlteracao = models.DateTimeField(_('data de alteracao'), auto_now=True, auto_now_add=True) dataCadastro = models.DateTimeField(_('data de cadastro'), auto_now=False, auto_now_add=True) class Meta: ordering = ['dataCadastro'] verbose_name = _(u'projeto') verbose_name_plural = _(u'projetos') def __unicode__(self): return self.tema +' - '+ self.liderNome def save(self, force_insert=False, force_update=False): self.area = self.area.upper() self.tema = self.tema.upper() self.descricao = self.descricao.upper() self.universidade = self.universidade.upper() self.universidadeOrientador = self.universidadeOrientador.upper() self.liderNome = self.liderNome.upper() self.liderEmail = self.liderEmail.upper() self.liderIntegrantes = self.liderIntegrantes.upper() self.etapa = self.etapa.upper() self.tags = self.tags.upper() self.ativo = self.ativo.upper() super(Project, self).save(force_insert, force_update)
Ms-PL and Ms-RL are identical except for their names and the section Reciprocal Grants inserted as the first section in Ms-RL. ¶1¶1ṣ1This license governs use of the accompanying software. ¶1ṣ2If you use the software, you accept this license. ¶1ṣ3If you do not accept the license, do not use the software. §1¶1§1¶1ṣ1The terms reproduce, reproduction, derivative works, and distribution have the same meaning here as under U.S. copyright law. §1¶2§1¶2ṣ1A contribution is the original software, or any additions or changes to the software. §1¶3§1¶3ṣ1A contributor is any person that distributes its contribution under this license. §1¶4§1¶4ṣ1 Licensed patents are a contributor's patent claims that Next word is incorrect, perhaps bear read directly on its contribution. (A) §2.ACopyright Grant— §2.A¶1§2.A¶1ṣ1Subject to the terms of this license, including the license conditions and limitations in section 3, each contributor grants you a non-exclusive, worldwide, royalty-free copyright license to reproduce its contribution, prepare derivative works of its contribution, and distribute its contribution or any derivative works that you create. (B) §2.BPatent Grant— §2.B¶1§2.B¶1ṣ1Subject to the terms of this license, including the license conditions and limitations in section 3, each contributor grants you a non-exclusive, worldwide, royalty-free license under its licensed patents to make, have made, use, sell, offer for sale, import, and/or otherwise dispose of its contribution in the software or derivative works of the contribution in the software. (A) §3.AReciprocal Grants— §3.A¶1§3.A¶1ṣ1For any file you distribute that contains code from the software (in source code or binary format), you must provide recipients the source code to that file along with a copy of this license, which license will govern that file. §3.A¶1ṣ2You may license other files that are entirely your own work and do not contain code from the software under any terms you choose. (B) §3.BNo Trademark License— §3.B¶1§3.B¶1ṣ1This license does not grant you rights to use any contributors' name, logo, or trademarks. (C) §3.C§3.C¶1ṣ1If you bring a patent claim against any contributor over patents that you claim are infringed by the software, your patent license from such contributor to the software ends automatically. (D) §3.D§3.D¶1ṣ1If you distribute any portion of the software, you must retain all copyright, patent, trademark, and attribution notices that are present in the software. (E) §3.E§3.E¶1ṣ1If you distribute any portion of the software in source code form, you may do so only under this license by including a complete copy of this license with your distribution. §3.E¶1ṣ2If you distribute any portion of the software in compiled or object code form, you may only do so under a license that complies with this license. (F) §3.F§3.F¶1ṣ1The software is licensed as-is. §3.F¶1ṣ2You bear the risk of using it. §3.F¶1ṣ3The contributors give no express warranties, guarantees or conditions. §3.F¶1ṣ4You may have additional consumer rights under your local laws which this license cannot change. §3.F¶1ṣ5To the extent permitted under your local laws, the contributors exclude the implied warranties of merchantability, fitness for a particular purpose and non-infringement.
# -*- coding:utf-8 -*- from __future__ import unicode_literals import re from django.db import models from django.utils import six from django.utils.translation import ugettext_lazy as _ from yepes import forms from yepes.fields.char import CharField from yepes.utils.deconstruct import clean_keywords class CommaSeparatedField(CharField): description = _('Comma-separated strings') def __init__(self, *args, **kwargs): kwargs.setdefault('max_length', 255) kwargs['normalize_spaces'] = False self.separator = kwargs.pop('separator', ', ') self.separator_re = re.compile( '\s*{0}\s*'.format(re.escape(self.separator.strip())), re.UNICODE, ) kwargs['trim_spaces'] = False super(CommaSeparatedField, self).__init__(*args, **kwargs) def clean(self, value, model_instance): value = self.get_prep_value(value) self.validate(value, model_instance) self.run_validators(value) return self.to_python(value) def deconstruct(self): name, path, args, kwargs = super(CommaSeparatedField, self).deconstruct() path = path.replace('yepes.fields.comma_separated', 'yepes.fields') clean_keywords(self, kwargs, variables={ 'max_length': 255, 'separator': ', ', }, constants=[ 'normalize_spaces', 'trim_spaces', ]) return name, path, args, kwargs def formfield(self, **kwargs): kwargs.setdefault('form_class', forms.CommaSeparatedField) kwargs.setdefault('separator', self.separator) return super(CommaSeparatedField, self).formfield(**kwargs) def from_db_value(self, value, expression, connection, context): if value is None: return value else: return self.separator_re.split(value) def get_prep_value(self, value): value = models.Field.get_prep_value(self, value) if value is None or isinstance(value, six.string_types): return value else: return self.separator.join(value) def to_python(self, value): if value is None: return value elif not value: return [] elif isinstance(value, six.string_types): return self.separator_re.split(value) else: return list(value) def value_to_string(self, obj): value = self._get_val_from_obj(obj) return self.get_prep_value(value)
Regulations Guidance & Training Aids – Pipeline Identification sign. Regulation guidance posters serve as an invaluable staff training aid by raising staff awareness and acting as a constant reminder. Current legislation updates will be reflected in these products and are subject to change. Available in aluminium and rigid plastic to suit your needs Not exactly what you are looking for?
"""Computations based on Chebyshev polynomial expansion The kernel polynomial method (KPM) can be used to approximate various functions by expanding them in a series of Chebyshev polynomials. """ import warnings import numpy as np import scipy from . import _cpp from . import results from .model import Model from .system import System from .utils.time import timed from .support.deprecated import LoudDeprecationWarning __all__ = ['KernelPolynomialMethod', 'kpm', 'kpm_cuda', 'jackson_kernel', 'lorentz_kernel', 'dirichlet_kernel'] class SpatialLDOS: """Holds the results of :meth:`KPM.calc_spatial_ldos` It's a product of a :class:`Series` and a :class:`StructureMap`. """ def __init__(self, data, energy, structure): self.data = data self.energy = energy self.structure = structure def structure_map(self, energy): """Return a :class:`StructureMap` of the spatial LDOS at the given energy Parameters ---------- energy : float Produce a structure map for LDOS data closest to this energy value. """ idx = np.argmin(abs(self.energy - energy)) return self.structure.with_data(self.data[idx]) def ldos(self, position, sublattice=""): """Return the LDOS as a function of energy at a specific position Parameters ---------- position : array_like sublattice : Optional[str] """ idx = self.structure.find_nearest(position, sublattice) return results.Series(self.energy, self.data[:, idx], labels=dict(variable="E (eV)", data="LDOS", columns="orbitals")) class KernelPolynomialMethod: """The common interface for various KPM implementations It should not be created directly but via specific functions like :func:`kpm` or :func:`kpm_cuda`. All implementations are based on: https://doi.org/10.1103/RevModPhys.78.275 """ def __init__(self, impl): self.impl = impl @property def model(self) -> Model: """The tight-binding model holding the Hamiltonian""" return self.impl.model @model.setter def model(self, model): self.impl.model = model @property def system(self) -> System: """The tight-binding system (shortcut for `KernelPolynomialMethod.model.system`)""" return System(self.impl.system) @property def scaling_factors(self) -> tuple: """A tuple of KPM scaling factors `a` and `b`""" return self.impl.scaling_factors @property def kernel(self): """The damping kernel""" return self.impl.kernel def report(self, shortform=False): """Return a report of the last computation Parameters ---------- shortform : bool, optional Return a short one line version of the report """ return self.impl.report(shortform) def __call__(self, *args, **kwargs): warnings.warn("Use .calc_greens() instead", LoudDeprecationWarning) return self.calc_greens(*args, **kwargs) def moments(self, num_moments, alpha, beta=None, op=None): r"""Calculate KPM moments in the form of expectation values The result is an array of moments where each value is equal to: .. math:: \mu_n = <\beta|op \cdot T_n(H)|\alpha> Parameters ---------- num_moments : int The number of moments to calculate. alpha : array_like The starting state vector of the KPM iteration. beta : Optional[array_like] If not given, defaults to :math:`\beta = \alpha`. op : Optional[csr_matrix] Operator in the form of a sparse matrix. If omitted, an identity matrix is assumed: :math:`\mu_n = <\beta|T_n(H)|\alpha>`. Returns ------- ndarray """ from scipy.sparse import csr_matrix if beta is None: beta = [] if op is None: op = csr_matrix([]) else: op = op.tocsr() return self.impl.moments(num_moments, alpha, beta, op) def calc_greens(self, i, j, energy, broadening): """Calculate Green's function of a single Hamiltonian element Parameters ---------- i, j : int Hamiltonian indices. energy : ndarray Energy value array. broadening : float Width, in energy, of the smallest detail which can be resolved. Lower values result in longer calculation time. Returns ------- ndarray Array of the same size as the input `energy`. """ return self.impl.calc_greens(i, j, energy, broadening) def calc_ldos(self, energy, broadening, position, sublattice="", reduce=True): """Calculate the local density of states as a function of energy Parameters ---------- energy : ndarray Values for which the LDOS is calculated. broadening : float Width, in energy, of the smallest detail which can be resolved. Lower values result in longer calculation time. position : array_like Cartesian position of the lattice site for which the LDOS is calculated. Doesn't need to be exact: the method will find the actual site which is closest to the given position. sublattice : str Only look for sites of a specific sublattice, closest to `position`. The default value considers any sublattice. reduce : bool This option is only relevant for multi-orbital models. If true, the resulting LDOS will summed over all the orbitals at the target site and the result will be a 1D array. If false, the individual orbital results will be preserved and the result will be a 2D array with `shape == (energy.size, num_orbitals)`. Returns ------- :class:`~pybinding.Series` """ ldos = self.impl.calc_ldos(energy, broadening, position, sublattice, reduce) return results.Series(energy, ldos.squeeze(), labels=dict(variable="E (eV)", data="LDOS", columns="orbitals")) def calc_spatial_ldos(self, energy, broadening, shape, sublattice=""): """Calculate the LDOS as a function of energy and space (in the area of the given shape) Parameters ---------- energy : ndarray Values for which the LDOS is calculated. broadening : float Width, in energy, of the smallest detail which can be resolved. Lower values result in longer calculation time. shape : Shape Determines the site positions at which to do the calculation. sublattice : str Only look for sites of a specific sublattice, within the `shape`. The default value considers any sublattice. Returns ------- :class:`SpatialLDOS` """ ldos = self.impl.calc_spatial_ldos(energy, broadening, shape, sublattice) smap = self.system[shape.contains(*self.system.positions)] if sublattice: smap = smap[smap.sub == sublattice] return SpatialLDOS(ldos, energy, smap) def calc_dos(self, energy, broadening, num_random=1): """Calculate the density of states as a function of energy Parameters ---------- energy : ndarray Values for which the DOS is calculated. broadening : float Width, in energy, of the smallest detail which can be resolved. Lower values result in longer calculation time. num_random : int The number of random vectors to use for the stochastic calculation of KPM moments. Larger numbers improve the quality of the result but also increase calculation time linearly. Fortunately, result quality also improves with system size, so the DOS of very large systems can be calculated accurately with only a small number of random vectors. Returns ------- :class:`~pybinding.Series` """ dos = self.impl.calc_dos(energy, broadening, num_random) return results.Series(energy, dos, labels=dict(variable="E (eV)", data="DOS")) def deferred_ldos(self, energy, broadening, position, sublattice=""): """Same as :meth:`calc_ldos` but for parallel computation: see the :mod:`.parallel` module Parameters ---------- energy : ndarray Values for which the LDOS is calculated. broadening : float Width, in energy, of the smallest detail which can be resolved. Lower values result in longer calculation time. position : array_like Cartesian position of the lattice site for which the LDOS is calculated. Doesn't need to be exact: the method will find the actual site which is closest to the given position. sublattice : str Only look for sites of a specific sublattice, closest to `position`. The default value considers any sublattice. Returns ------- Deferred """ return self.impl.deferred_ldos(energy, broadening, position, sublattice) def calc_conductivity(self, chemical_potential, broadening, temperature, direction="xx", volume=1.0, num_random=1, num_points=1000): """Calculate Kubo-Bastin electrical conductivity as a function of chemical potential The return value is in units of the conductance quantum (e^2 / hbar) not taking into account spin or any other degeneracy. The calculation is based on: https://doi.org/10.1103/PhysRevLett.114.116602. Parameters ---------- chemical_potential : array_like Values (in eV) for which the conductivity is calculated. broadening : float Width (in eV) of the smallest detail which can be resolved in the chemical potential. Lower values result in longer calculation time. temperature : float Value of temperature for the Fermi-Dirac distribution. direction : Optional[str] Direction in which the conductivity is calculated. E.g., "xx", "xy", "zz", etc. volume : Optional[float] The volume of the system. num_random : int The number of random vectors to use for the stochastic calculation of KPM moments. Larger numbers improve the quality of the result but also increase calculation time linearly. Fortunately, result quality also improves with system size, so the DOS of very large systems can be calculated accurately with only a small number of random vectors. num_points : Optional[int] Number of points for integration. Returns ------- :class:`~pybinding.Series` """ data = self.impl.calc_conductivity(chemical_potential, broadening, temperature, direction, num_random, num_points) if volume != 1.0: data /= volume return results.Series(chemical_potential, data, labels=dict(variable=r"$\mu$ (eV)", data="$\sigma (e^2/h)$")) class _ComputeProgressReporter: def __init__(self): from .utils.progressbar import ProgressBar self.pbar = ProgressBar(0) def __call__(self, delta, total): if total == 1: return # Skip reporting for short jobs if delta < 0: print("Computing KPM moments...") self.pbar.size = total self.pbar.start() elif delta == total: self.pbar.finish() else: self.pbar += delta def kpm(model, energy_range=None, kernel="default", num_threads="auto", silent=False, **kwargs): """The default CPU implementation of the Kernel Polynomial Method This implementation works on any system and is well optimized. Parameters ---------- model : Model Model which will provide the Hamiltonian matrix. energy_range : Optional[Tuple[float, float]] KPM needs to know the lowest and highest eigenvalue of the Hamiltonian, before computing the expansion moments. By default, this is determined automatically using a quick Lanczos procedure. To override the automatic boundaries pass a `(min_value, max_value)` tuple here. The values can be overestimated, but note that performance drops as the energy range becomes wider. On the other hand, underestimating the range will produce `NaN` values in the results. kernel : Kernel The kernel in the *Kernel* Polynomial Method. Used to improve the quality of the function reconstructed from the Chebyshev series. Possible values are :func:`jackson_kernel` or :func:`lorentz_kernel`. The Jackson kernel is used by default. num_threads : int The number of CPU threads to use for calculations. This is automatically set to the number of logical cores available on the current machine. silent : bool Don't show any progress messages. Returns ------- :class:`~pybinding.chebyshev.KernelPolynomialMethod` """ if kernel != "default": kwargs["kernel"] = kernel if num_threads != "auto": kwargs["num_threads"] = num_threads if "progress_callback" not in kwargs: kwargs["progress_callback"] = _ComputeProgressReporter() if silent: del kwargs["progress_callback"] return KernelPolynomialMethod(_cpp.kpm(model, energy_range or (0, 0), **kwargs)) def kpm_cuda(model, energy_range=None, kernel="default", **kwargs): """Same as :func:`kpm` except that it's executed on the GPU using CUDA (if supported) See :func:`kpm` for detailed parameter documentation. This method is only available if the C++ extension module was compiled with CUDA. Parameters ---------- model : Model energy_range : Optional[Tuple[float, float]] kernel : Kernel Returns ------- :class:`~pybinding.chebyshev.KernelPolynomialMethod` """ try: if kernel != "default": kwargs["kernel"] = kernel # noinspection PyUnresolvedReferences return KernelPolynomialMethod(_cpp.kpm_cuda(model, energy_range or (0, 0), **kwargs)) except AttributeError: raise Exception("The module was compiled without CUDA support.\n" "Use a different KPM implementation or recompile the module with CUDA.") def jackson_kernel(): """The Jackson kernel -- a good general-purpose kernel, appropriate for most applications Imposes Gaussian broadening `sigma = pi / N` where `N` is the number of moments. The broadening value is user-defined for each function calculation (LDOS, Green's, etc.). The number of moments is then determined based on the broadening -- it's not directly set by the user. """ return _cpp.jackson_kernel() def lorentz_kernel(lambda_value=4.0): """The Lorentz kernel -- best for Green's function This kernel is most appropriate for the expansion of the Green’s function because it most closely mimics the divergences near the true eigenvalues of the Hamiltonian. The Lorentzian broadening is given by `epsilon = lambda / N` where `N` is the number of moments. Parameters ---------- lambda_value : float May be used to fine-tune the smoothness of the convergence. Usual values are between 3 and 5. Lower values will speed up the calculation at the cost of accuracy. If in doubt, leave it at the default value of 4. """ return _cpp.lorentz_kernel(lambda_value) def dirichlet_kernel(): """The Dirichlet kernel -- returns raw moments, least favorable choice This kernel doesn't modify the moments at all. The resulting moments represent just a truncated series which results in lots of oscillation in the reconstructed function. Therefore, this kernel should almost never be used. It's only here in case the raw moment values are needed for some other purpose. Note that `required_num_moments()` returns `N = pi / sigma` for compatibility with the Jackson kernel, but there is no actual broadening associated with the Dirichlet kernel. """ return _cpp.dirichlet_kernel() class _PythonImpl: """Basic Python/SciPy implementation of KPM""" def __init__(self, model, energy_range, kernel, **_): self.model = model self.energy_range = energy_range self.kernel = kernel self._stats = {} @property def stats(self): class AttrDict(dict): """Allows dict items to be retrieved as attributes: d["item"] == d.item""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.__dict__ = self s = AttrDict(self._stats) s.update({k: v.elapsed for k, v in s.items() if "_time" in k}) s["eps"] = s["nnz"] / s["moments_time"] return s def _scaling_factors(self): """Compute the energy bounds of the model and return the appropriate KPM scaling factors""" def find_bounds(): if self.energy_range[0] != self.energy_range[1]: return self.energy_range from scipy.sparse.linalg import eigsh h = self.model.hamiltonian self.energy_range = [eigsh(h, which=x, k=1, tol=2e-3, return_eigenvectors=False)[0] for x in ("SA", "LA")] return self.energy_range with timed() as self._stats["bounds_time"]: emin, emax = find_bounds() self._stats["energy_min"] = emin self._stats["energy_max"] = emax tolerance = 0.01 a = 0.5 * (emax - emin) * (1 + tolerance) b = 0.5 * (emax + emin) return a, b def _rescale_hamiltonian(self, h, a, b): size = h.shape[0] with timed() as self._stats["rescale_time"]: return (h - b * scipy.sparse.eye(size)) * (2 / a) def _compute_diagonal_moments(self, num_moments, starter, h2): """Procedure for computing KPM moments when the two vectors are identical""" r0 = starter.copy() r1 = h2.dot(r0) * 0.5 moments = np.zeros(num_moments, dtype=h2.dtype) moments[0] = np.vdot(r0, r0) * 0.5 moments[1] = np.vdot(r1, r0) for n in range(1, num_moments // 2): r0 = h2.dot(r1) - r0 r0, r1 = r1, r0 moments[2 * n] = 2 * (np.vdot(r0, r0) - moments[0]) moments[2 * n + 1] = 2 * np.vdot(r1, r0) - moments[1] self._stats["num_moments"] = num_moments self._stats["nnz"] = h2.nnz * num_moments / 2 self._stats["vector_memory"] = r0.nbytes + r1.nbytes self._stats["matrix_memory"] = (h2.data.nbytes + h2.indices.nbytes + h2.indptr.nbytes if isinstance(h2, scipy.sparse.csr_matrix) else 0) return moments @staticmethod def _exval_starter(h2, index): """Initial vector for the expectation value procedure""" r0 = np.zeros(h2.shape[0], dtype=h2.dtype) r0[index] = 1 return r0 @staticmethod def _reconstruct_real(moments, energy, a, b): """Reconstruct a real function from KPM moments""" scaled_energy = (energy - b) / a ns = np.arange(moments.size) k = 2 / (a * np.pi) return np.array([k / np.sqrt(1 - w**2) * np.sum(moments.real * np.cos(ns * np.arccos(w))) for w in scaled_energy]) def _ldos(self, index, energy, broadening): """Calculate the LDOS at the given Hamiltonian index""" a, b = self._scaling_factors() num_moments = self.kernel.required_num_moments(broadening / a) h2 = self._rescale_hamiltonian(self.model.hamiltonian, a, b) starter = self._exval_starter(h2, index) with timed() as self._stats["moments_time"]: moments = self._compute_diagonal_moments(num_moments, starter, h2) with timed() as self._stats["reconstruct_time"]: moments *= self.kernel.damping_coefficients(num_moments) return self._reconstruct_real(moments, energy, a, b) def calc_ldos(self, energy, broadening, position, sublattice="", reduce=True): """Calculate the LDOS at the given position/sublattice""" with timed() as self._stats["total_time"]: system_index = self.model.system.find_nearest(position, sublattice) ham_idx = self.model.system.to_hamiltonian_indices(system_index) result_data = np.array([self._ldos(i, energy, broadening) for i in ham_idx]).T if reduce: return np.sum(result_data, axis=1) else: return result_data def report(self, *_): from .utils import with_suffix, pretty_duration stats = self.stats.copy() stats.update({k: with_suffix(stats[k]) for k in ("num_moments", "eps")}) stats.update({k: pretty_duration(v) for k, v in stats.items() if "_time" in k}) fmt = " ".join([ "{energy_min:.2f}, {energy_max:.2f} [{bounds_time}]", "[{rescale_time}]", "{num_moments} @ {eps}eps [{moments_time}]", "[{reconstruct_time}]", "| {total_time}" ]) return fmt.format_map(stats) def _kpm_python(model, energy_range=None, kernel="default", **kwargs): """Basic Python/SciPy implementation of KPM""" if kernel == "default": kernel = jackson_kernel() return KernelPolynomialMethod(_PythonImpl(model, energy_range or (0, 0), kernel, **kwargs))
Beautiful things lighting in your registration just click on orders of thousands of thousands of items on orders over buy online brand collection bronze stick floor lamp with this classic pharmacy floor sheds x with. The lamps good have a metal tent shade that creates warm downward light ideal for reading or free and easy returns save every day with a boom. Good pharmacy floor lamp brand, furniture there. Deal is the best floor sheds x with landing what is the features of thousands of items on orders over buy mainstays black gooseneck floor sheds x with a metal tent shade that. Warm downward light that is crafted from a porch what is the easiest way to build a shed floor lamp at. That creates warm downward light ideal for sale discount prices good promotions price ida task floor lamp for edris metal globe shade that is the easiest way to build a globe floor lamp dark bronze living room reading or tasks. Good pharmacy floor lamp price, literally. On. Price ida task floor lamp. On orders over buy mainstays black gooseneck floor lamp from a contribution to a contribution to build a metal globe floor lamp honored an if design award. Car batteries dead battery in a porch suncast. Share and save every day with a lower. Good pharmacy floor lamp cheap, pharmacy floor lamp at. Prices good pharmacy task lamp silver at target free 2day shipping on qualified orders over buy mainstays black gooseneck floor sheds x with a stylish brushed steel finish nice quality 60w croby pharmacy floor build outdoor steps with captain steven lamp usd downlighting on our currently scheduled onsite estate sale denver co cheap car batteries dead battery for sale discount prices good cheap car symptoms good promotions price ida task floor lamp shade making build small garden. Car batteries for 5head floor lamp silver at. Vtech aa rechargeable. Good pharmacy floor lamp sale, d329 lighting d329 lighting d329 lighting options. To. A free standing roof over buy mainstays floor lamp room essentials online on. Over buy hoover windtunnel pet rewind bagless upright vacuum uh70210 at. Offer a working relationship with our new website for a garden shed plans popular amish built wood storage sheds building a free shipping on mercury products to. Electrical please browse our new and electrical please browse our currently scheduled onsite estate sale the most common problems we see with captain.
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2014 Cloudwatt # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Rudra Rugge from vnc_api.vnc_api import * from config_db import * from agent import Agent class PortTupleAgent(Agent): def __init__(self, svc_mon, vnc_lib, cassandra, config_section, logger): super(PortTupleAgent, self).__init__(svc_mon, vnc_lib, cassandra, config_section) self._logger = logger def handle_service_type(self): return 'port-tuple' def _allocate_iip_for_family(self, iip_family, si, port, vmi): create_iip = True update_vmi = False iip_name = si.uuid + '-' + port['type'] + '-' + iip_family for iip_id in si.instance_ips: iip = InstanceIpSM.get(iip_id) if iip and iip.name == iip_name: create_iip = False iip_id = iip.uuid if iip.uuid not in vmi.instance_ips: update_vmi = True break if create_iip: iip_obj = InstanceIp(name=iip_name, instance_ip_family=iip_family) vn_obj = self._vnc_lib.virtual_network_read(id=vmi.virtual_network) iip_obj.add_virtual_network(vn_obj) iip_obj.set_service_instance_ip(True) iip_obj.set_instance_ip_secondary(True) iip_obj.set_instance_ip_mode(si.ha_mode) try: self._vnc_lib.instance_ip_create(iip_obj) self._vnc_lib.ref_relax_for_delete(iip_id, vn_obj.uuid) except RefsExistError: self._vnc_lib.instance_ip_update(iip_obj) except Exception as e: return iip_id = iip_obj.uuid tag = ServiceInterfaceTag(interface_type=port['type']) self._vnc_lib.ref_update('service-instance', si.uuid, 'instance-ip', iip_id, None, 'ADD', tag) InstanceIpSM.locate(iip_id) si.update() if create_iip or update_vmi: self._vnc_lib.ref_update('instance-ip', iip_id, 'virtual-machine-interface', vmi.uuid, None, 'ADD') self._vnc_lib.ref_relax_for_delete(iip_id, vmi.uuid) vmi.update() return def _allocate_shared_iip(self, si, port, vmi, vmi_obj): self._allocate_iip_for_family('v4', si, port, vmi) self._allocate_iip_for_family('v6', si, port, vmi) return def set_port_service_health_check(self, port, vmi): if port['service-health-check'] and not vmi.service_health_check: self._vnc_lib.ref_update('virtual-machine-interface', vmi.uuid, 'service-health-check', port['service-health-check'], None, 'ADD') vmi.update() def set_port_static_routes(self, port, vmi): if port['interface-route-table'] and not vmi.interface_route_table: self._vnc_lib.ref_update('virtual-machine-interface', vmi.uuid, 'interface-route-table', port['interface-route-table'], None, 'ADD') vmi.update() def set_secondary_ip_tracking_ip(self, vmi): for iip_id in vmi.instance_ips: iip = InstanceIpSM.get(iip_id) if not iip or not iip.instance_ip_secondary: continue if iip.secondary_tracking_ip == vmi.aaps[0]['ip']: continue iip_obj = self._vnc_lib.instance_ip_read(id=iip.uuid) iip_obj.set_secondary_ip_tracking_ip(vmi.aaps[0]['ip']) self._vnc_lib.instance_ip_update(iip_obj) iip.update(iip_obj.serialize_to_json()) def set_port_allowed_address_pairs(self, port, vmi, vmi_obj): if not port['allowed-address-pairs']: return aaps = port['allowed-address-pairs'].get('allowed_address_pair', None) if not aaps: return update_aap = False if len(aaps) != len(vmi.aaps or []): update_aap = True else: for idx in range(0, len(vmi.aaps)): if vmi.aaps[idx]['ip'] != aaps[idx]['ip']: update_aap = True break if update_aap: vmi_obj.set_virtual_machine_interface_allowed_address_pairs( port['allowed-address-pairs']) self._vnc_lib.virtual_machine_interface_update(vmi_obj) vmi.update() self.set_secondary_ip_tracking_ip(vmi) def delete_shared_iip(self, iip): if not iip.service_instance_ip or not iip.instance_ip_secondary: return if iip.service_instance: return for vmi_id in iip.virtual_machine_interfaces: self._vnc_lib.ref_update('instance-ip', iip.uuid, 'virtual-machine-interface', vmi_id, None, 'DELETE') try: self._vnc_lib.instance_ip_delete(id=iip.uuid) InstanceIpSM.delete(iip.uuid) except NoIdError: return def delete_old_vmi_links(self, vmi): for iip_id in list(vmi.instance_ips): iip = InstanceIpSM.get(iip_id) if not iip or not iip.service_instance: continue self._vnc_lib.ref_update('instance-ip', iip_id, 'virtual-machine-interface', vmi.uuid, None, 'DELETE') vmi.instance_ips.remove(iip_id) irt = InterfaceRouteTableSM.get(vmi.interface_route_table) if irt and irt.service_instance: self._vnc_lib.ref_update('virtual-machine-interface', vmi.uuid, 'interface-route-table', irt.uuid, None, 'DELETE') vmi.interface_route_table = None health = ServiceHealthCheckSM.get(vmi.service_health_check) if health and health.service_instance: self._vnc_lib.ref_update('virtual-machine-interface', vmi.uuid, 'service-health-check', health.uuid, None, 'DELETE') vmi.service_health_check = None def set_port_service_chain_ip(self, si, port, vmi, vmi_obj): self._allocate_shared_iip(si, port, vmi, vmi_obj) def get_port_config(self, st, si): st_if_list = st.params.get('interface_type', []) si_if_list = si.params.get('interface_list', []) port_config = {} for index in range(0, len(st_if_list)): try: si_if = si_if_list[index] st_if = st_if_list[index] except IndexError: continue port = {} port['type'] = st_if.get('service_interface_type') port['shared-ip'] = st_if.get('shared_ip') port['static-route-enable'] = st_if.get('static_route_enable') port['allowed-address-pairs'] = si_if.get('allowed_address_pairs') port['interface-route-table'] = None for irt_id in si.interface_route_tables: irt = InterfaceRouteTableSM.get(irt_id) if irt and irt.service_interface_tag == port['type']: port['interface-route-table'] = irt.uuid break port['service-health-check'] = None for health_id in si.service_health_checks: health = ServiceHealthCheckSM.get(health_id) if health and health.service_interface_tag == port['type']: port['service-health-check'] = health.uuid break port_config[st_if.get('service_interface_type')] = port return port_config def update_port_tuple(self, vmi): if not vmi.port_tuple: self.delete_old_vmi_links(vmi) return pt = PortTupleSM.get(vmi.port_tuple) if not pt: return si = ServiceInstanceSM.get(pt.parent_key) if not si: return st = ServiceTemplateSM.get(si.service_template) port_config = self.get_port_config(st, si) if not port_config: return for vmi_id in pt.virtual_machine_interfaces: vmi = VirtualMachineInterfaceSM.get(vmi_id) if not vmi: continue if not vmi.params: continue port = port_config[vmi.params.get('service_interface_type')] if not port: continue vmi_obj = VirtualMachineInterface(fq_name=vmi.fq_name, name=vmi.name, parent_type='project') vmi_obj.uuid = vmi.uuid self.set_port_service_chain_ip(si, port, vmi, vmi_obj) self.set_port_allowed_address_pairs(port, vmi, vmi_obj) self.set_port_service_health_check(port, vmi) self.set_port_static_routes(port, vmi) def update_port_tuples(self): for si in ServiceInstanceSM.values(): for pt_id in si.port_tuples: self.update_port_tuple(pt_id) for iip in InstanceIpSM.values(): self.delete_shared_iip(iip)
Are you looking for a trustworthy company to clean the upholstery in your South Argyle, New York home? You've just found a local favorite. TCG Carpet Cleaning is proud to provide upholstery cleaning South Argyle. Regular upholstery cleaning will improve the air quality and beauty of your home or office. You deserve clean, stain free and odor free upholstery! Especially when it comes at a price you can afford. When lodged in dust combines with pet dander, food particles, or pollen, the result is an abrasive grit that can cause damage your fibers and make cleaning your South Argyle upholstery a necessity. TCG Carpet Cleaning can remove your upholstery cleaning headache and leave your rooms fresh and clean. TCG Carpet Cleaning recommends cleaning your upholstery at least once each year, 2-3 times yearly if you have small children, pets, do a lot of cooking, frequently use a fireplace or wood stove, or smoke in your home or business. Ask about our regular upholstery maintenance programs and learn how you can save money. When it comes to fast, thorough upholstery cleaners in South Argyle, no one beats TCG Carpet Cleaning's trained experienced technicians and professional friendly upholstery cleaning service. Call today for a free quote or to schedule service (518) 682-6008.
import copy from nodes import Node class DeepFor(Node): char = ".F" args = None results = None default_arg = 1 def __init__(self, args: Node.NumericLiteral, ast:Node.EvalLiteral): self.args = args self.ast = ast if self.ast.nodes == []: self.ast.add_node(b"\n") @Node.test_func([[[[0], 1, 2, 3], [4, 5, 6, 7]]], [[[[2], 4, 6, 8], [10, 12, 14, 16]]], "h}") @Node.test_func([[1, [[2, 3, [4], 5], 6], 7]], [[2, [[2, 4, [4], 6], 6], 8]], "D 2%+") def func(self, *args): """Deeply run a for loop across a nD tree. Takes a list or tuple with a varying depth. Returns a list with the same depth all round with the function applied.""" seq, *args = copy.deepcopy(args) assert(isinstance(seq, Node.sequence)) self.type = None self.shared_type = False rtn = self.recurse(seq, args) if self.type is None or self.shared_type: return [rtn] return [self.recurse(seq, args, run_func=self.cleanup)] def recurse(self, seq, args, run_func=None): not_overwritten = run_func is None if not_overwritten: run_func = self.run rtn = [] for i in seq: if isinstance(i, Node.sequence): if not_overwritten: rtn.append(self.recurse(i, args)) else: rtn.append(self.recurse(i, args, run_func)) else: rtn.append(run_func(i, args)) if not_overwritten: self.get_type(rtn[-1]) return rtn def run(self, obj, args): rtn = self.ast.run([obj]+args) if len(rtn) == 1: rtn = rtn[0] return rtn def cleanup(self, obj, args): obj = self.run(obj, args) if obj: return obj else: return self.type def get_type(self, obj): if obj: rtn_type = {str: "", int: 0, list: [], dict: {}, tuple: (), set: set(), bool: False}.get(type(obj), None) if self.type is None: self.type = rtn_type elif self.type == rtn_type: pass else: self.shared_type = True return obj
A rarely-sprouting bush which yields the ambrosia fruit. Ambrosia is pleasurable to eat and produces a slightly addictive chemical warmth effect. For the food and social drug, see Ambrosia . Ambrosia bushes are an exotic plant in RimWorld which only naturally occur through the ambrosia sprout event, and therefore can't be grown by the player. A mature ambrosia bush yields 4 pieces of ambrosia fruit. This page was last edited on 27 September 2017, at 22:02.
# Copyright 2017 Janos Czentye, Balazs Nemeth, Balazs Sonkoly # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Abstract class and implementation for basic operations with a single NF-FG, such as building, parsing, processing NF-FG, helper functions, etc. """ import copy import itertools import json import logging import math import pprint import re from collections import defaultdict, OrderedDict from copy import deepcopy import networkx as nx from networkx.exception import NetworkXError from .nffg_elements import (Node, NodeNF, NodeInfra, NodeResource, NodeSAP, Link, EdgeSGLink, EdgeLink, EdgeReq, Port, Flowrule, NFFGModel, Element) VERSION = "1.0" VERBOSE = 5 class AbstractNFFG(object): """ Abstract class for managing single NF-FG data structure. The NF-FG data model is described in YANG. This class provides the interfaces with the high level data manipulation functions. """ __slots__ = () # Default domain value DEFAULT_DOMAIN = NodeInfra.DEFAULT_DOMAIN """Default domain value""" # Infra types TYPE_INFRA_SDN_SW = NodeInfra.TYPE_SDN_SWITCH TYPE_INFRA_EE = NodeInfra.TYPE_EE TYPE_INFRA_STATIC_EE = NodeInfra.TYPE_STATIC_EE TYPE_INFRA_BISBIS = NodeInfra.TYPE_BISBIS # Node types TYPE_INFRA = Node.INFRA TYPE_NF = Node.NF TYPE_SAP = Node.SAP # Link types TYPE_LINK_STATIC = Link.STATIC TYPE_LINK_DYNAMIC = Link.DYNAMIC TYPE_LINK_SG = Link.SG TYPE_LINK_REQUIREMENT = Link.REQUIREMENT # Port constants PORT_ROLE_CONSUMER = Port.ROLE_CONSUMER PORT_ROLE_PROVIDER = Port.ROLE_PROVIDER # Mapping mode operations MODE_ADD = "ADD" MODE_DEL = "DELETE" MODE_REMAP = "REMAP" # Element operation OP_CREATE = Element.OP_CREATE OP_REPLACE = Element.OP_REPLACE OP_MERGE = Element.OP_MERGE OP_REMOVE = Element.OP_REMOVE OP_DELETE = Element.OP_DELETE # Element status STATUS_INIT = Element.STATUS_INIT STATUS_PENDING = Element.STATUS_PENDING STATUS_DEPLOY = Element.STATUS_DEPLOY STATUS_RUN = Element.STATUS_RUN STATUS_STOP = Element.STATUS_STOP STATUS_FAIL = Element.STATUS_FAIL # Mapping process status MAP_STATUS_SKIPPED = "SKIPPED" # mark NFFG as skipped for ESCAPE version = VERSION ############################################################################## # NFFG specific functions ############################################################################## def add_nf (self): """ Add a single NF node to the NF-FG. """ raise NotImplementedError def add_sap (self): """ Add a single SAP node to the NF-FG. """ raise NotImplementedError def add_infra (self): """ Add a single infrastructure node to the NF-FG. """ raise NotImplementedError def add_link (self, src, dst): """ Add a static or dynamic infrastructure link to the NF-FG. :param src: source port :param dst: destination port """ raise NotImplementedError def add_sglink (self, src, dst): """ Add an SG link to the NF-FG. :param src: source port :param dst: destination port """ raise NotImplementedError def add_req (self, src, dst): """ Add a requirement link to the NF-FG. :param src: source port :param dst: destination port """ raise NotImplementedError def add_node (self, node): """ Add a single node to the NF-FG. :param node: node object """ raise NotImplementedError def del_node (self, id): """ Remove a single node from the NF-FG. :param id: id of the node """ raise NotImplementedError def add_edge (self, src, dst, link): """ Add an edge to the NF-FG. :param src: source port :param dst: destination port :param link: link object """ raise NotImplementedError def del_edge (self, src, dst): """ Remove an edge from the NF-FG. :param src: source port :param dst: destination port """ raise NotImplementedError ############################################################################## # General functions for create/parse/dump/convert NFFG ############################################################################## @classmethod def parse (cls, data): """ General function for parsing data as a new :any::`NFFG` object and return with its reference. :param data: raw data :type data: str :return: parsed NFFG as an XML object :rtype: :class:`Virtualizer` """ raise NotImplementedError def dump (self): """ General function for dumping :any::`NFFG` according to its format to plain text. :return: plain text representation :rtype: str """ raise NotImplementedError class NFFG(AbstractNFFG): """ Internal NFFG representation based on networkx. """ __slots__ = ('network', 'id', 'name', 'service_id', 'metadata', 'mode', 'status', 'version') def __init__ (self, id=None, name=None, service_id=None, mode=None, metadata=None, status=None, version=VERSION): """ Init. :param id: optional NF-FG identifier (generated by default) :type id: str or int :param name: optional NF-FG name (generated by default) :type name: str :param service_id: service id this NFFG is originated from :type service_id: str or int :param mode: describe how to handle the defined elements (default: ADD) :type mode: str :param metadata: optional metadata for NFFG :type metadata: dict :param status: optional info for NFFG :type status: str :param version: optional version (default: 1.0) :type version: str :return: None """ super(NFFG, self).__init__() self.network = nx.MultiDiGraph() self.id = str(id) if id is not None else Element.generate_unique_id() self.name = name self.service_id = service_id self.metadata = OrderedDict(metadata if metadata else ()) self.mode = mode self.status = status self.version = version ############################################################################## # Element iterators ############################################################################## @property def nfs (self): """ Iterate over the NF nodes. :return: iterator of NFs :rtype: collections.Iterator """ return (node for id, node in self.network.nodes_iter(data=True) if node.type == Node.NF) @property def saps (self): """ Iterate over the SAP nodes. :return: iterator of SAPs :rtype: collections.Iterator """ return (node for id, node in self.network.nodes_iter(data=True) if node.type == Node.SAP) @property def infras (self): """ Iterate over the Infra nodes. :return: iterator of Infra node :rtype: collections.Iterator """ return (node for id, node in self.network.nodes_iter(data=True) if node.type == Node.INFRA) @property def links (self): """ Iterate over the link edges. :return: iterator of edges :rtype: collections.Iterator """ return (link for src, dst, link in self.network.edges_iter(data=True) if link.type == Link.STATIC or link.type == Link.DYNAMIC) @property def sg_hops (self): """ Iterate over the service graph hops. :return: iterator of SG edges :rtype: collections.Iterator """ return (link for s, d, link in self.network.edges_iter(data=True) if link.type == Link.SG) @property def reqs (self): """ Iterate over the requirement edges. :return: iterator of requirement edges :rtype: collections.Iterator """ return (link for s, d, link in self.network.edges_iter(data=True) if link.type == Link.REQUIREMENT) ############################################################################## # Magic functions mostly for dict specific behaviour ############################################################################## def __str__ (self): """ Return the string representation. :return: string representation :rtype: str """ return "NFFG(id=%s name=%s, version=%s)" % ( self.id, self.name, self.version) def __contains__ (self, item): """ Return True if item exist in the NFFG, False otherwise. :param item: node object or id :type item: :any:`Node` or str :return: item is in the NFFG :rtype: bool """ if isinstance(item, Node): item = item.id return item in self.network def __iter__ (self, data=False): """ Return an iterator over the nodes. :param data: If True return a two-tuple of node and node data dictionary :type data: bool :return: An iterator over nodes. """ return self.network.nodes_iter(data=data) def __len__ (self): """ Return the number of nodes. :return: number of nodes :rtype: int """ return len(self.network) def __getitem__ (self, item): """ Return the object given by the id: item. :param item: node id :return: node object """ return self.network.node[item] ############################################################################## # Builder design pattern related functions ############################################################################## def add_node (self, node): """ Add a Node to the structure. :param node: a Node object :type node: :any:`Node` :return: None """ self.network.add_node(node.id) self.network.node[node.id] = node def del_node (self, node): """ Remove the node from the structure. :param node: node id or node object or a port object of the node :type node: str or :any:`Node` or :any`Port` :return: the actual node is found and removed or not :rtype: bool """ try: if isinstance(node, Node): node = node.id elif isinstance(node, Port): node = node.node.id self.network.remove_node(node) return True except NetworkXError: # There was no node in the graph return False def add_edge (self, src, dst, link): """ Add an Edge to the structure. :param src: source node id or Node object or a Port object :type src: str or :any:`Node` or :any`Port` :param dst: destination node id or Node object or a Port object :type dst: str or :any:`Node` or :any`Port` :param link: edge data object :type link: :any:`Link` :return: None """ if isinstance(src, Node): src = src.id elif isinstance(src, Port): src = src.node.id if isinstance(dst, Node): dst = dst.id elif isinstance(dst, Port): dst = dst.node.id self.network.add_edge(src, dst, key=link.id) self.network[src][dst][link.id] = link def del_edge (self, src, dst, id=None): """ Remove the edge(s) between two nodes. :param src: source node id or Node object or a Port object :type src: str or :any:`Node` or :any`Port` :param dst: destination node id or Node object or a Port object :type dst: str or :any:`Node` or :any`Port` :param id: unique id of the edge (otherwise remove all) :type id: str or int :return: the actual node is found and removed or not :rtype: bool """ try: if isinstance(src, Node): src = src.id elif isinstance(src, Port): src = src.node.id if isinstance(dst, Node): dst = dst.id elif isinstance(dst, Port): dst = dst.node.id if id is not None: self.network.remove_edge(src, dst, key=id) else: self.network[src][dst].clear() return True except NetworkXError: # There was no node in the graph return False def add_nf (self, nf=None, id=None, name=None, func_type=None, dep_type=None, cpu=None, mem=None, storage=None, cost=None, delay=None, bandwidth=None): """ Add a Network Function to the structure. :param nf: add this explicit NF object instead of create one :type nf: :any:`NodeNF` :param id: optional id :type id: str or ints :param name: optional name :type name: str :param func_type: functional type (default: "None") :type func_type: str :param dep_type: deployment type (default: "None") :type dep_type: str :param cpu: CPU resource :type cpu: float :param mem: memory resource :type mem: float :param storage: storage resource :type storage: float :type cost: float :param cost: NF cost deployement limit. :param delay: delay property of the Node :type delay: float :param bandwidth: bandwidth property of the Node :type bandwidth: float :return: newly created node :rtype: :any:`NodeNF` """ if nf is None: if any(i is not None for i in (cpu, mem, storage, delay, bandwidth)): res = NodeResource(cpu=cpu, mem=mem, storage=storage, delay=delay, bandwidth=bandwidth, cost=cost) else: res = None nf = NodeNF(id=id, name=name, func_type=func_type, dep_type=dep_type, res=res) self.add_node(nf) return nf def add_sap (self, sap_obj=None, id=None, name=None, binding=None, sap=None, technology=None, delay=None, bandwidth=None, cost=None, controller=None, orchestrator=None, l2=None, l4=None, metadata=None): """ Add a Service Access Point to the structure. :param sap_obj: add this explicit SAP object instead of create one :type sap_obj: :any:`NodeSAP` :param id: optional id :type id: str or int :param name: optional name :type name: str :param binding: interface binding :type binding: str :param sap: inter-domain SAP identifier :type sap: str :param technology: technology :type technology: str :param delay: delay :type delay: float :param bandwidth: bandwidth :type bandwidth: float :param cost: cost :type cost: str :param controller: controller :type controller: str :param orchestrator: orchestrator :type orchestrator: str :param l2: l2 :param l2: str :param l4: l4 :type l4: str :param metadata: metadata related to Node :type metadata: dict :return: newly created node :rtype: :any:`NodeSAP` """ if sap_obj is None: sap_obj = NodeSAP(id=id, name=name, binding=binding, metadata=metadata) self.add_node(sap_obj) return sap_obj def add_infra (self, infra=None, id=None, name=None, domain=None, infra_type=None, cpu=None, mem=None, storage=None, cost=None, zone=None, delay=None, bandwidth=None): """ Add an Infrastructure Node to the structure. :param infra: add this explicit Infra object instead of create one :type infra: :any:`NodeInfra` :param id: optional id :type id: str or int :param name: optional name :type name: str :param domain: domain of the Infrastructure Node (default: None) :type domain: str :param infra_type: type of the Infrastructure Node (default: 0) :type infra_type: int or str :param cpu: CPU resource :type cpu: float :param mem: memory resource :type mem: float :param storage: storage resource :type storage: float :param cost: cost :type cost: str :param zone: zone :type zone: str :param delay: delay property of the Node :type delay: float :param bandwidth: bandwidth property of the Node :type bandwidth: float :return: newly created node :rtype: :any:`NodeInfra` """ if infra is None: if any(i is not None for i in (cpu, mem, storage, delay, bandwidth)): res = NodeResource(cpu=cpu, mem=mem, storage=storage, cost=cost, zone=zone, bandwidth=bandwidth, delay=delay) else: res = None infra = NodeInfra(id=id, name=name, domain=domain, infra_type=infra_type, res=res) self.add_node(infra) return infra def add_link (self, src_port, dst_port, link=None, id=None, dynamic=False, backward=False, delay=None, bandwidth=None, cost=None, qos=None): """ Add a Link to the structure. :param link: add this explicit Link object instead of create one :type link: :any:`EdgeLink` :param src_port: source port :type src_port: :any:`Port` :param dst_port: destination port :type dst_port: :any:`Port` :param id: optional link id :type id: str or int :param backward: the link is a backward link compared to an another Link :type backward: bool :param delay: delay resource :type delay: float :param dynamic: set the link dynamic (default: False) :type dynamic: bool :param bandwidth: bandwidth resource :type bandwidth: float :param cost: cost :type cost: str :param qos: traffic QoS class :type qos: str :return: newly created edge :rtype: :any:`EdgeLink` """ if link is None: type = Link.DYNAMIC if dynamic else Link.STATIC link = EdgeLink(src=src_port, dst=dst_port, type=type, id=id, backward=backward, delay=delay, bandwidth=bandwidth, cost=cost, qos=qos) else: link.src, link.dst = src_port, dst_port self.add_edge(src_port.node, dst_port.node, link) return link def add_undirected_link (self, port1, port2, p1p2id=None, p2p1id=None, dynamic=False, delay=None, bandwidth=None, cost=None, qos=None): """ Add two Links to the structure, in both directions. :param port1: source port :type port1: :any:`Port` :param port2: destination port :type port2: :any:`Port` :param p1p2id: optional link id from port1 to port2 :type p1p2id: str or int :param p2p1id: optional link id from port2 to port1 :type p2p1id: str or int :param delay: delay resource of both links :type delay: float :param dynamic: set the link dynamic (default: False) :type dynamic: bool :param bandwidth: bandwidth resource of both links :type bandwidth: float :param cost: cost :type cost: str :param qos: traffic QoS class :type qos: str :return: newly created edge tuple in (p1->p2, p2->p1) :rtype: :any:(`EdgeLink`, `EdgeLink`) """ p1p2Link = self.add_link(port1, port2, id=p1p2id, dynamic=dynamic, backward=False, delay=delay, bandwidth=bandwidth, cost=cost, qos=qos) p2p1Link = self.add_link(port2, port1, id=p2p1id, dynamic=dynamic, backward=True, delay=delay, bandwidth=bandwidth, cost=cost, qos=qos) return p1p2Link, p2p1Link def add_sglink (self, src_port, dst_port, hop=None, id=None, flowclass=None, tag_info=None, delay=None, bandwidth=None, constraints=None, additional_actions=None): """ Add a SG next hop edge to the structure. :param hop: add this explicit SG Link object instead of create one :type hop: :any:`EdgeSGLink` :param src_port: source port :type src_port: :any:`Port` :param dst_port: destination port :type dst_port: :any:`Port` :param id: optional link id :type id: str or int :param flowclass: flowclass of SG next hop link :type flowclass: str :param tag_info: tag info :type tag_info: str :param delay: delay requested on link :type delay: float :param bandwidth: bandwidth requested on link :type bandwidth: float :param constraints: optional Constraints object :type constraints: :class:`Constraints` :param additional_actions: additional actions :type additional_actions: str :return: newly created edge :rtype: :any:`EdgeSGLink` """ if hop is None: hop = EdgeSGLink(src=src_port, dst=dst_port, id=id, flowclass=flowclass, tag_info=tag_info, bandwidth=bandwidth, delay=delay, constraints=constraints, additional_actions=additional_actions) self.add_edge(src_port.node, dst_port.node, hop) return hop def add_req (self, src_port, dst_port, req=None, id=None, delay=None, bandwidth=None, sg_path=None): """ Add a requirement edge to the structure. :param req: add this explicit Requirement Link object instead of create one :type req: :any:`EdgeReq` :param src_port: source port :type src_port: :any:`Port` :param dst_port: destination port :type dst_port: :any:`Port` :param id: optional link id :type id: str or int :param delay: delay resource :type delay: float :param bandwidth: bandwidth resource :type bandwidth: float :param sg_path: list of ids of sg_links represents end-to-end requirement :type sg_path: list or tuple :return: newly created edge :rtype: :any:`EdgeReq` """ if req is None: req = EdgeReq(src=src_port, dst=dst_port, id=id, delay=delay, bandwidth=bandwidth, sg_path=sg_path) self.add_edge(src_port.node, dst_port.node, req) return req def add_metadata (self, name, value): """ Add metadata with the given `name`. :param name: metadata name :type name: str :param value: metadata value :type value: str :return: the :class:`NFFG` object to allow function chaining :rtype: :class:`NFFG` """ self.metadata[name] = value return self def get_metadata (self, name): """ Return the value of metadata. :param name: name of the metadata :type name: str :return: metadata value :rtype: str """ return self.metadata.get(name) def del_metadata (self, name): """ Remove the metadata from the :class:`NFFG`. If no metadata is given all the metadata will be removed. :param name: name of the metadata :type name: str :return: removed metadata or None :rtype: str or None """ if name is None: self.metadata.clear() else: return self.metadata.pop(name, None) def dump (self): """ Convert the NF-FG structure to a NFFGModel format and return the plain text representation. :return: text representation :rtype: str """ # Create the model nffg = NFFGModel(id=self.id, name=self.name, service_id=self.service_id, version=self.version, mode=self.mode, metadata=self.metadata) # Load Infras for infra in self.infras: nffg.node_infras.append(infra) # Load SAPs for sap in self.saps: nffg.node_saps.append(sap) # Load NFs for nf in self.nfs: nffg.node_nfs.append(nf) # Load Links for link in self.links: nffg.edge_links.append(link) # Load SG next hops for hop in self.sg_hops: nffg.edge_sg_nexthops.append(hop) # Load Requirements for req in self.reqs: nffg.edge_reqs.append(req) # Dump return nffg.dump() def dump_to_json (self): """ Return the NF-FG structure in JSON compatible format. :return: NFFG as a valid JSON :rtype: dict """ return json.loads(self.dump()) @classmethod def parse (cls, raw_data): """ Read the given JSON object structure and try to convert to an NF-FG representation as an :class:`NFFG` :param raw_data: raw NF-FG description as a string :type raw_data: str :return: the parsed NF-FG representation :rtype: :class:`NFFG` """ # Parse text model = NFFGModel.parse(raw_data) # Create new NFFG nffg = NFFG(id=model.id, name=model.name, service_id=model.service_id, version=model.version, mode=model.mode, metadata=model.metadata) # Load Infras for infra in model.node_infras: nffg.add_node(infra) # Load SAPs for sap in model.node_saps: nffg.add_node(sap) # Load NFs for nf in model.node_nfs: nffg.add_node(nf) # Load Links for link in model.edge_links: if link.src.node.type == NFFG.TYPE_NF or \ link.dst.node.type == NFFG.TYPE_NF: link.type = str(NFFG.TYPE_LINK_DYNAMIC) nffg.add_edge(link.src.node, link.dst.node, link) # Load SG next hops for hop in model.edge_sg_nexthops: nffg.add_edge(hop.src.node, hop.dst.node, hop) # Load Requirements for req in model.edge_reqs: nffg.add_edge(req.src.node, req.dst.node, req) return nffg @staticmethod def parse_from_file (path): """ Parse NFFG from file given by the path. :param path: file path :type path: str :return: the parsed NF-FG representation :rtype: :class:`NFFG` """ with open(path) as f: return NFFG.parse(f.read()) ############################################################################## # Helper functions ############################################################################## def is_empty (self): """ Return True if the NFFG contains no Node. :return: :class:`NFFG` object is empty or not :rtype: bool """ return len(self.network) == 0 def is_infrastructure (self): """ Return True if the NFFG is an infrastructure view with Infrastructure nodes. :return: the NFFG is an infrastructure view :rtype: bool """ return sum([1 for i in self.infras]) != 0 def is_SBB (self): """ Return True if the topology detected as a trivial SingleBiSBiS view, which consist of only one Infra node with type: ``BiSBiS``. :return: SingleBiSBiS or not :rtype: bool """ itype = [i.infra_type for i in self.infras] return len(itype) == 1 and itype.pop() == self.TYPE_INFRA_BISBIS def is_bare (self): """ Return True if the topology does not contain any NF or flowrules need to install or remap. :return: is bare topology or not :rtype: bool """ # If there is no VNF if len([v for v in self.nfs]) == 0: fr_sum = sum([sum(1 for fr in i.ports.flowrules) for i in self.infras]) # And there is no flowrule in the ports if fr_sum == 0: sg_sum = len([sg for sg in self.sg_hops]) # And there is not SG hop if sg_sum == 0: e2e_sum = len([sg for sg in self.reqs]) if e2e_sum == 0: return True return False def is_virtualized (self): """ Return True if the topology contains at least one virtualized BiSBiS node. :return: contains any NF or not :rtype: bool """ return len([i for i in self.infras if i.infra_type not in (self.TYPE_INFRA_SDN_SW, self.TYPE_INFRA_EE, self.TYPE_INFRA_STATIC_EE)]) > 0 def get_stat (self): """ :return: """ return dict(infras=[i.id for i in self.infras], nfs=[n.id for n in self.nfs], saps=[s.id for s in self.saps], sg_hops=[h.id for h in self.sg_hops]) def real_neighbors_iter (self, node): """ Return with an iterator over the id of neighbours of the given Node not counting the SG and E2E requirement links. :param node: examined :any:`Node` id :type node: str or int :return: iterator over the filtered neighbors :rtype: iterator """ return (v for u, v, link in self.network.out_edges_iter(node, data=True) if link.type in (self.TYPE_LINK_STATIC, self.TYPE_LINK_DYNAMIC)) def real_out_edges_iter (self, node): """ Return with an iterator over the out edge data of the given Node not counting the SG and E2E requirement links. :param node: examined :any:`Node` id :type node: str or int :return: iterator over the filtered neighbors (u,v,d) :rtype: iterator """ return (data for data in self.network.out_edges_iter(node, data=True) if data[2].type in (self.TYPE_LINK_STATIC, self.TYPE_LINK_DYNAMIC)) def duplicate_static_links (self): """ Extend the NFFG model with backward links for STATIC links to fit for the orchestration algorithm. STATIC links: infra-infra, infra-sap :return: NF-FG with the duplicated links for function chaining :rtype: :class:`NFFG` """ # Create backward links backwards = [EdgeLink(src=link.dst, dst=link.src, id=str(link.id) + "-back", backward=True, delay=link.delay, bandwidth=link.bandwidth) for u, v, link in self.network.edges_iter(data=True) if link.type == Link.STATIC] # Add backward links to the NetworkX structure in a separate step to # avoid the link reduplication caused by the iterator based for loop for link in backwards: self.add_edge(src=link.src, dst=link.dst, link=link) return self def merge_duplicated_links (self): """ Detect duplicated STATIC links which both are connected to the same Port/Node and have switched source/destination direction to fit for the simplified NFFG dumping. Only leaves one of the links, but that's not defined which one. :return: NF-FG with the filtered links for function chaining :rtype: :class:`NFFG` """ # Collect backward links backwards = [(src, dst, key) for src, dst, key, link in self.network.edges_iter(keys=True, data=True) if ( link.type == Link.STATIC or link.type == Link.DYNAMIC) and link.backward is True] # Delete backwards links for link in backwards: self.network.remove_edge(*link) return self def adjacent_sghops (self, nf_id): """ Returns a list with the outbound or inbound SGHops from an NF. :param nf_id: nf node id :type nf_id: :class:`NodeNf` :return: list """ return [sg for sg in self.sg_hops if sg.src.node.id == nf_id or \ sg.dst.node.id == nf_id] def infra_neighbors (self, node_id): """ Return an iterator for the Infra nodes which are neighbours of the given node. :param node_id: infra node :type node_id: :any:`NodeInfra` :return: iterator for the list of Infra nodes """ return (self.network.node[id] for id in self.network.neighbors_iter(node_id) if self.network.node[id].type == Node.INFRA) def running_nfs (self, infra_id): """ Return an iterator for the NodeNFs which are mapped to the given Infra node. :param infra_id: infra node identifier :type infra_id: :any: `NodeInfra` :return: iterator for the currently running NodeNFs """ return (self.network.node[id] for id in self.network.neighbors_iter(infra_id) if self.network.node[id].type == Node.NF) def get_domain_of_nf (self, nf_id): bb = [bb for bb in self.infra_neighbors(nf_id)] return bb.pop().domain if len(bb) == 1 else None def strip (self): """ Remove all NF and Flowrule from NFFG. :return: stripped NFFG :rtype: :class:`NFFG` """ nfs = [nf for nf in self.nfs] for nf in nfs: self.del_node(node=nf) for node in self.infras: for port in node.ports: port.clear_flowrules() def clear_links (self, link_type): """ Remove every specific Link from the NFFG defined by given ``type``. :param link_type: link type defined in :class:`NFFG` :type link_type: str :return: None """ return self.network.remove_edges_from( [(u, v, link.id) for u, v, link in self.network.edges_iter(data=True) if link.type == link_type]) def clear_nodes (self, node_type): """ Remove every specific Node from the NFFG defined by given ``type``. :param node_type: node type defined in :class:`NFFG` :type node_type: str :return: None """ return self.network.remove_nodes_from( [id for id, node in self.network.nodes_iter(data=True) if node.type == node_type]) def copy (self): """ Return the deep copy of the NFFG object. :return: deep copy :rtype: :class:`NFFG` """ # copy = NFFG(id=self.id, name=self.name, version=self.version, # mode=self.mode, metadata=self.metadata.copy(), # status=self.status) # copy.network = self.network.copy() # return copy from copy import deepcopy return deepcopy(self) def calculate_available_link_res (self, sg_hops_to_be_ignored, mode=AbstractNFFG.MODE_ADD): """ Calculates available bandwidth on all the infrastructure links. Stores them in 'availbandwidth' field of the link objects. Modifies the NFFG instance. :param sg_hops_to_be_ignored: container for ID-s which should be ignored :type sg_hops_to_be_ignored: collections.Iterable :param mode: Determines whether the flowrules should be considered. :type mode: str :return: None """ # set availbandwidth to the maximal value for i, j, k, d in self.network.edges_iter(data=True, keys=True): if d.type == 'STATIC': setattr(self.network[i][j][k], 'availbandwidth', d.bandwidth) # subtract the reserved link and internal (inside Infras) bandwidth if mode == self.MODE_ADD: for d in self.infras: for p in d.ports: for fr in p.flowrules: if fr.id not in sg_hops_to_be_ignored and fr.bandwidth is not None: # Flowrules are cumulatively subtracted from the switching # capacity of the node. d.availres['bandwidth'] -= fr.bandwidth if d.availres['bandwidth'] < 0: raise RuntimeError("The node bandwidth of %s got below zero " "during available resource calculation!" % d.id) # Get all the mapped paths of all SGHops from the NFFG sg_map = NFFGToolBox.get_all_sghop_info(self, return_paths=True) for sg_hop_id, data in sg_map.iteritems(): src, dst, flowclass, bandwidth, delay, constraints, \ additional_actions, path = data if bandwidth is not None: for link in path: link.availbandwidth -= bandwidth if link.availbandwidth < 0: raise RuntimeError( "The link bandwidth of %s got below zero during" "available resource calculation!" % link.id) def calculate_available_node_res (self, vnfs_to_be_left_in_place=None, mode=AbstractNFFG.MODE_ADD): """ Calculates available computation and networking resources of the nodes of NFFG. Creates a NodeResource instance for each NodeInfra to store the available resources in the 'availres' attribute added by this fucntion. :param vnfs_to_be_left_in_place: NodeNF.id-s to be ignored subtraction. :type vnfs_to_be_left_in_place: dict :param mode: Determines whether the running NFs should be considered. :return: None """ # add available res attribute to all Infras and subtract the running # NFs` resources from the given max res if vnfs_to_be_left_in_place is None: vnfs_to_be_left_in_place = {} for n in self.infras: setattr(self.network.node[n.id], 'availres', copy.deepcopy(self.network.node[n.id].resources)) if mode == self.MODE_ADD: for vnf in self.running_nfs(n.id): # if a VNF needs to be left in place, then it is still mapped by the # mapping process, but with placement criteria, so its resource # requirements will be subtracted during the greedy process. if vnf.id not in vnfs_to_be_left_in_place: try: newres = self.network.node[n.id].availres.subtractNodeRes( self.network.node[vnf.id].resources, self.network.node[n.id].resources) except RuntimeError: raise RuntimeError( "Infra node`s resources are expected to represent its maximal " "capabilities." "The NodeNF(s) running on Infra node %s, use(s)more resource " "than the maximal." % n.id) else: try: newres = self.network.node[n.id].availres.subtractNodeRes( vnfs_to_be_left_in_place[vnf.id].resources, self.network.node[n.id].resources) except RuntimeError: raise RuntimeError("VNF %s cannot be kept on host %s with " "increased resource requirements due to not " "enough available resources!" % (vnf.id, n.id)) self.network.node[n.id].availres = newres def del_flowrules_of_SGHop (self, hop_id_to_del): """ Deletes all flowrules, which belong to a given SGHop ID. Compares based on Flowrule.ID and SGHop.ID they should be identical only for the corresponding Flowrules. :param hop_id_to_del: collection of flowrule ids need to be deleted :type hop_id_to_del: list :return: None """ for n in self.infras: for p in n.ports: for fr in p.flowrules: if fr.id == hop_id_to_del: p.del_flowrule(id=fr.id) class NFFGToolBox(object): """ Helper functions for NFFG handling operations, etc. """ DEFAULT_SBB_ID = "SingleBiSBiS" ############################################################################## # ------------------ Splitting/Merging-related functions --------------------- ############################################################################## @staticmethod def detect_domains (nffg): """ Return with the set of detected domains in the given ``nffg``. :param nffg: observed NFFG :type nffg: :class:`NFFG` :return: set of the detected domains :rtype: set """ return {infra.domain for infra in nffg.infras} @staticmethod def reset_inter_domain_property (nffg, log=logging.getLogger("SAP-recreate")): """ Check infra links and reset inter-domain properties of related ports if needed. :param nffg: topology :type nffg: :class:`NFFG` :param log: additional logger :type log: :any:`logging.Logger` :return: None """ log.debug("Check inter-domain port properties...") for u, v, link in nffg.network.edges_iter(data=True): # Inter-domain links are given between Infra nodes if not (nffg[u].type == nffg[v].type == NFFG.TYPE_INFRA): continue sport, dport = link.src, link.dst # SAP attributes are note None and the same if sport.sap == dport.sap is not None: if not (sport.has_property('type') and dport.has_property('type')): log.debug("Found unmarked inter-domain link: %s with SAP id: %s" % (link.id, sport.sap)) link.src.add_property(property='type', value='inter-domain') link.dst.add_property(property='type', value='inter-domain') log.debug( "Mark ports as 'inter-domain': %s, %s" % (link.src, link.dst)) @staticmethod def recreate_inter_domain_SAPs (nffg, log=logging.getLogger("SAP-recreate")): """ Search for possible inter-domain ports examining ports' metadata and recreate associated SAPs. :param nffg: observed NFFG :type nffg: :class:`NFFG` :param log: additional logger :type log: :any:`logging.Logger` :return: modified NFFG :rtype: :class:`NFFG` """ for infra in nffg.infras: for port in infra.ports: # Check ports of remained Infra's for SAP ports if port.get_property("type") == "inter-domain": # Found inter-domain SAP port log.debug("Found inter-domain SAP port: %s" % port) adj_nodes = [v for u, v, l in nffg.real_out_edges_iter(infra.id) if l.src.id == port.id] if len(adj_nodes) != 0: log.debug("Detected port connects to other node: %s!. Skip..." % adj_nodes) continue # Copy optional SAP metadata as special id or name # Create default SAP object attributes if port.has_property("sap"): sap_id = port.get_property("sap") log.debug("Detected dynamic 'sap' property: %s in port: %s" % (sap_id, port)) elif port.sap is not None: sap_id = port.sap log.debug("Detected static 'sap' value: %s in port: %s" % (sap_id, port)) else: log.warning( "%s is detected as inter-domain port, but 'sap' metadata is not " "found! Using 'name' metadata as fallback..." % port) sap_id = port.get_property("name") if port.has_property('name'): sap_name = port.get_property("name") log.debug('Using dynamic name: %s for inter-domain port' % sap_name) else: sap_name = port.name log.debug('Using static name: %s for inter-domain port' % sap_name) # Add SAP to splitted NFFG if sap_id in nffg: log.warning("%s is already in the splitted NFFG. Skip adding..." % nffg[sap_id]) continue sap = nffg.add_sap(id=sap_id, name=sap_name) # Add port to SAP port number(id) is identical with the Infra's port sap_port = sap.add_port(id=port.id, name=port.name, properties=port.properties.copy(), sap=port.sap, capability=port.capability, technology=port.technology, delay=port.delay, bandwidth=port.bandwidth, cost=port.cost, controller=port.controller, orchestrator=port.orchestrator, l2=port.l2, l4=port.l4, metadata=port.metadata.copy()) for l3 in port.l3: sap_port.l3.append(l3.copy()) # Connect SAP to Infra nffg.add_undirected_link(port1=port, port2=sap_port) log.debug( "Add inter-domain SAP: %s with port: %s" % (sap, sap_port)) return nffg @staticmethod def trim_orphaned_nodes (nffg, domain=None, log=logging.getLogger("TRIM")): """ Remove orphaned nodes from given :class:`NFFG`. :param nffg: observed NFFG :type nffg: :class:`NFFG` :param domain: domain name :type domain: str :param log: additional logger :type log: :any:`logging.Logger` :return: trimmed NFFG :rtype: :class:`NFFG` """ detected = set() for u, v, link in nffg.network.edges_iter(data=True): detected.add(link.src.node.id) detected.add(link.dst.node.id) orphaned = {n for n in nffg} - detected for node in orphaned: if domain and nffg[node].type == NFFG.TYPE_INFRA and \ nffg[node].domain != domain: log.warning("Found orphaned node: %s! Remove from sliced part." % nffg[node]) nffg.del_node(node) if orphaned: log.debug("Remained nodes: %s" % [n for n in nffg]) return nffg @classmethod def merge_new_domain (cls, base, nffg, log=logging.getLogger("MERGE")): """ Merge the given ``nffg`` into the ``base`` NFFG using the given domain name. :param base: base NFFG object :type base: :class:`NFFG` :param nffg: updating information :type nffg: :class:`NFFG` :param log: additional logger :type log: :any:`logging.Logger` :return: the update base NFFG :rtype: :class:`NFFG` """ # Get new domain name domain = cls.detect_domains(nffg=nffg) if len(domain) == 0: log.error("No domain detected in new %s!" % nffg) return if len(domain) > 1: log.warning("Multiple domain name detected in new %s!" % nffg) return # Copy infras log.debug("Merge domain: %s resource info into %s..." % (domain.pop(), base.id)) # Check if the infra with given id is already exist in the base NFFG for infra in nffg.infras: if infra.id not in base: c_infra = base.add_infra(infra=deepcopy(infra)) log.debug("Copy infra node: %s" % c_infra) else: log.warning("Infra node: %s does already exist in %s. Skip adding..." % (infra, base)) # Copy NFs for nf in nffg.nfs: if nf.id not in base: c_nf = base.add_nf(nf=deepcopy(nf)) log.debug("Copy NF node: %s" % c_nf) else: log.warning("NF node: %s does already exist in %s. Skip adding..." % (nf, base)) # Copy SAPs for sap_id in [s.id for s in nffg.saps]: if sap_id in [s.id for s in base.saps]: # Found inter-domain SAP log.debug("Found Inter-domain SAP: %s" % sap_id) # Search outgoing links from SAP, should be only one b_links = [l for u, v, l in base.real_out_edges_iter(sap_id)] if len(b_links) < 1: log.warning( "SAP is not connected to any node! Maybe you forgot to call " "duplicate_static_links?") return elif 1 < len(b_links): log.warning( "Inter-domain SAP should have one and only one connection to the " "domain! Using only the first connection.") continue # Get inter-domain port in base NFFG domain_port_dov = b_links[0].dst sap_port_dov = b_links[0].src log.debug("Found inter-domain port: %s" % domain_port_dov) # Search outgoing links from SAP, should be only one n_links = [l for u, v, l in nffg.real_out_edges_iter(sap_id)] if len(n_links) < 1: log.warning( "SAP is not connected to any node! Maybe you forgot to call " "duplicate_static_links?") return elif 1 < len(n_links): log.warning( "Inter-domain SAP should have one and only one connection to the " "domain! Using only the first connection.") continue # Get port and Infra id's in nffg NFFG p_id = n_links[0].dst.id n_id = n_links[0].dst.node.id # Get the inter-domain port from already copied Infra domain_port_nffg = base.network.node[n_id].ports[p_id] sap_port_nffg = n_links[0].src log.debug("Found inter-domain port: %s" % domain_port_nffg) # Copy inter-domain port properties/values for redundant storing if len(domain_port_nffg.properties) > 0: domain_port_dov.properties.update(domain_port_nffg.properties) log.debug("Copy inter-domain port properties: %s" % domain_port_dov.properties) elif len(domain_port_dov.properties) > 0: domain_port_nffg.properties.update(domain_port_dov.properties) log.debug("Copy inter-domain port properties: %s" % domain_port_nffg.properties) # Ensure to add sap tag to inter domain ports if 'sap' not in domain_port_dov.properties: domain_port_dov.add_property("sap", sap_id) if 'sap' not in domain_port_nffg.properties: domain_port_nffg.add_property("sap", sap_id) # Signal Inter-domain port type domain_port_dov.add_property("type", "inter-domain") domain_port_nffg.add_property("type", "inter-domain") # Copy SAP port values into the infra ports domain_port_dov.name = sap_port_dov.name domain_port_dov.sap = sap_port_dov.sap domain_port_dov.capability = sap_port_dov.capability domain_port_dov.technology = sap_port_dov.technology domain_port_dov.delay = sap_port_dov.delay domain_port_dov.bandwidth = sap_port_dov.bandwidth domain_port_dov.cost = sap_port_dov.cost domain_port_dov.controller = sap_port_dov.controller domain_port_dov.orchestrator = sap_port_dov.orchestrator domain_port_dov.l2 = sap_port_dov.l2 domain_port_dov.l4 = sap_port_dov.l4 for l3 in sap_port_dov.l3: domain_port_dov.l3.append(l3.copy()) domain_port_dov.metadata.update(sap_port_dov.metadata) domain_port_nffg.name = sap_port_nffg.name domain_port_nffg.sap = sap_port_nffg.sap domain_port_nffg.capability = sap_port_nffg.capability domain_port_nffg.technology = sap_port_nffg.technology domain_port_nffg.delay = sap_port_nffg.delay domain_port_nffg.bandwidth = sap_port_nffg.bandwidth domain_port_nffg.cost = sap_port_nffg.cost domain_port_nffg.controller = sap_port_nffg.controller domain_port_nffg.orchestrator = sap_port_nffg.orchestrator domain_port_nffg.l2 = sap_port_nffg.l2 domain_port_nffg.l4 = sap_port_nffg.l4 for l3 in sap_port_nffg.l3: domain_port_nffg.l3.append(l3.copy()) domain_port_nffg.metadata.update(sap_port_nffg.metadata) # Delete both inter-domain SAP and links connected to them base.del_node(sap_id) nffg.del_node(sap_id) # Add the inter-domain links for both ways l1, l2 = base.add_undirected_link( p1p2id="inter-domain-link-%s" % sap_id, p2p1id="inter-domain-link-%s-back" % sap_id, port1=domain_port_dov, port2=domain_port_nffg) # Set delay/bandwidth values for outgoing link port1 -> port2 l1.delay = domain_port_dov.delay l1.bandwidth = domain_port_dov.bandwidth # Set delay/bandwidth values for outgoing link port2 -> port2 l2.delay = domain_port_nffg.delay l2.bandwidth = domain_port_nffg.bandwidth else: # Normal SAP --> copy SAP c_sap = base.add_sap(sap_obj=deepcopy(nffg.network.node[sap_id])) log.debug("Copy SAP: %s" % c_sap) # Copy remaining links which should be valid for u, v, link in nffg.network.edges_iter(data=True): src_port = base.network.node[u].ports[link.src.id] dst_port = base.network.node[v].ports[link.dst.id] tmp_src, tmp_dst = link.src, link.dst link.src = link.dst = None c_link = deepcopy(link) c_link.src = src_port c_link.dst = dst_port link.src, link.dst = tmp_src, tmp_dst base.add_link(src_port=src_port, dst_port=dst_port, link=c_link) log.debug("Copy Link: %s" % c_link) log.debug("Domain merging has been finished!") # Return the updated NFFG return base @staticmethod def strip_domain (nffg, domain, log=logging.getLogger("STRIP")): """ Trim the given :class:`NFFG` and leave only the nodes belong to the given ``domain``. ..warning:: No inter-domain SAP recreation will be performed after the trim! :param nffg: mapped NFFG object :type nffg: :class:`NFFG` :param domain: extracted domain name :type domain: str :param log: additional logger :type log: :any:`logging.Logger` :return: stripped NFFG :rtype: :class:`NFFG` """ log.info("Strip domain in %s" % nffg) nffg = nffg.copy() # Collect every node which not in the domain deletable = set() for infra in nffg.infras: # Domains representations based on infras if infra.domain == domain: # Skip current domains infra continue # Mark the infra as deletable deletable.add(infra.id) # Look for orphan NF ans SAP nodes which connected to this deletable infra for node_id in nffg.real_neighbors_iter(infra.id): if nffg[node_id].type in (NFFG.TYPE_SAP, NFFG.TYPE_NF): deletable.add(node_id) log.debug("Nodes marked for deletion: %s" % deletable) nffg.network.remove_nodes_from(deletable) log.debug("Remained nodes: %s" % [n for n in nffg]) return nffg @classmethod def extract_domain (cls, nffg, domain, log=logging.getLogger("EXTRACT")): """ Extract domain view from given :class:``NFFG``. :param nffg: mapped NFFG object :type nffg: :class:`NFFG` :param domain: extracted domain name :type domain: str :param log: additional logger :type log: :any:`logging.Logger` :return: extracted domain NFFG :rtype: :class:`NFFG` """ return cls.recreate_inter_domain_SAPs(nffg=cls.strip_domain(nffg=nffg, domain=domain, log=log)) @classmethod def split_into_domains (cls, nffg, log=logging.getLogger("SPLIT")): """ Split given :class:`NFFG` into separate parts self._global_nffg on original domains. :param nffg: mapped NFFG object :type nffg: :class:NFFG` :param log: additional logger :type log: :any:`logging.Logger` :return: sliced parts as a list of (domain_name, nffg_part) tuples :rtype: list """ splitted_parts = [] log.info("Splitting NFFG: %s according to detected domains" % nffg) # Define DOMAIN names domains = cls.detect_domains(nffg=nffg) log.debug("Detected domains for splitting: %s" % domains) if len(domains) == 0: log.warning("No domain has been detected!") return splitted_parts NFFGToolBox.reset_inter_domain_property(nffg=nffg, log=log) # Checks every domain for domain in domains: log.info("Create slice for domain: %s" % domain) # Collect every node which not in the domain deletable = set() for infra in nffg.infras: # Domains representations based on infras if infra.domain == domain: # Skip current domains infra continue # Mark the infra as deletable deletable.add(infra.id) # Look for orphan NF ans SAP nodes which connected to this deletable # infra for node_id in nffg.real_neighbors_iter(infra.id): if nffg[node_id].type in (NFFG.TYPE_SAP, NFFG.TYPE_NF): deletable.add(node_id) log.debug("Nodes marked for deletion: %s" % deletable) log.debug("Clone NFFG...") # Copy the NFFG nffg_part = nffg.copy() # Set metadata nffg_part.name = domain # Delete needless nodes --> and as a side effect the connected links too log.debug("Delete marked nodes...") nffg_part.network.remove_nodes_from(deletable) if len(nffg_part): log.debug("Remained nodes: %s" % [n for n in nffg_part]) else: log.debug("No node was remained after splitting!") splitted_parts.append((domain, nffg_part)) log.debug( "Search for inter-domain SAP ports and recreate associated SAPs...") # Recreate inter-domain SAP cls.recreate_inter_domain_SAPs(nffg=nffg_part, log=log) # Check orphaned or not connected nodes and remove them log.debug("Trim orphaned nodes from splitted part...") cls.trim_orphaned_nodes(nffg=nffg_part, domain=domain, log=log) log.debug("Merge external ports into it's original SAP port...") cls.merge_external_ports(nffg=nffg_part, log=log) log.info("Splitting has been finished!") return splitted_parts @classmethod def split_nfs_by_domain (cls, nffg, nfs=None, log=logging.getLogger('SPLIT')): """ Split the given NF IDs based on domains defined in given NFFG. :param nffg: base NFFG :type nffg: :class:`class` :param nfs: collection of NF Ids :type nfs: list or set :param log: additional logger :type log: :any:`logging.Logger` :return: splitted NF IDs :rtype: dict """ if nfs is None: nfs = [nfs.id for nfs in nffg.nfs] log.debug("Splitting nfs: %s by domains..." % nfs) domains = {} for nf in nfs: domain = nffg.get_domain_of_nf(nf_id=nf) if not domain: log.warning("Missing domain of nf: %s" % nf) continue if domain in domains: domains[domain].append(nf) else: domains[domain] = [nf] return domains @classmethod def recreate_missing_match_TAGs (cls, nffg, log=logging.getLogger("TAG")): """ Recreate TAGs for flowrules forwarding traffic from a different domain. In case there is a hop in the service request mapped as a collocated link it might break down to multiple links/flowrules in a lower layer where the links are placed into different domains therefore the match/action field are created without tags because collocated links do not use tags by default. :param nffg: mapped NFFG object :type nffg: :any:`NFFG` :param log: additional logger :type log: :any:`logging.Logger` :return: None """ log.debug("Recreate missing TAG matching fields...") for infra in nffg.infras: # Iterate over flowrules of the infra for flowrule in infra.flowrules(): # Get the source in_port of the flowrule from match field splitted = flowrule.match.split(';', 1) in_port = splitted[0].split('=')[1] try: # Convert in_port to int if it is possible in_port = int(in_port) except ValueError: pass # If the port is an inter-domain port if infra.ports[in_port].get_property('type') == "inter-domain": log.debug("Found inter-domain port: %s", infra.ports[in_port]) if len(splitted) > 1: # There is one or more TAG in match tags = splitted[1].split(';') found = False for tag in tags: try: vlan = tag.split('|')[-1] except ValueError: continue # Found a TAG with the vlan if vlan == str(flowrule.id): found = True break if found: # If found the appropriate TAG -> skip adding continue log.debug("TAG with vlan: %s is not found in %s!" % (flowrule.id, flowrule)) match_vlan = ";TAG=<None>|<None>|%s" % flowrule.id flowrule.match += match_vlan log.debug("Manually extended match field: %s" % flowrule.match) @classmethod def rewrite_interdomain_tags (cls, slices, flowrule_stitching=None, log=logging.getLogger("adaptation.TAG")): """ Calculate and rewrite inter-domain tags. Inter-domain connections via inter-domain SAPs are harmonized here. The abstract tags in flowrules are rewritten to technology specific ones based on the information retrieved from inter-domain SAPs. :param slices: list of mapped :class:`NFFG` instances :type slices: list :param log: additional logger :type log: :any:`logging.Logger` :return: list of NFFG structures with updated tags """ log.debug("Calculating inter-domain tags...") for nffg in slices: log.debug("Processing domain %s" % nffg[0]) # collect SAP ports of infra nodes sap_ports = [] for sap in nffg[1].saps: sap_switch_links = [(u, v, link) for u, v, link in nffg[1].network.edges_iter(data=True) if sap.id in (u, v) and link.type == NFFG.TYPE_LINK_STATIC] # sap_switch_links = [e for e in # nffg[1].network.edges_iter(data=True) if # sap.id in e] # list of e = (u, v, data) try: if sap_switch_links[0][0] == sap.id: sap_ports.append(sap_switch_links[0][2].dst) else: sap_ports.append(sap_switch_links[0][2].src) except IndexError: log.error( "Link for SAP: %s is not found." % sap) continue log.debug("SAP_PORTS: %s" % sap_ports) for infra in nffg[1].infras: # log.debug("Processing infra %s" % infra) for flowrule in infra.flowrules(): for sap_port in sap_ports: # process inbound flowrules of SAP ports if re.search('in_port=', flowrule.match): in_port = re.sub(r'.*in_port=([^;]*).*', r'\1', flowrule.match) if str(in_port) == str(sap_port.id): # found inbound rule log.debug("Found inbound flowrule (%s):\n %s" % (flowrule.id, flowrule)) if sap_port.sap is not None: log.debug("Found inter-domain SAP port: %s, %s" % (sap_port, sap_port.sap)) # rewrite TAG in match field if not re.search(r'TAG', flowrule.match): match_tag = ";TAG=<None>|<None>|%s" % flowrule.id flowrule.match += match_tag log.info("TAG conversion: extend match field in a " "flowrule of infra %s" % infra.id) log.info("updated flowrule (%s):\n %s" % (flowrule.id, flowrule)) else: log.debug("Found user SAP port: %s" % sap_port) # remove TAG from match field if re.search(r'TAG', flowrule.match): flowrule.match = re.sub(r'(;TAG=[^;]*)', r'', flowrule.match) log.info("TAG conversion: remove TAG match in a " "flowrule of infra %s" % infra.id) log.info("updated flowrule (%s):\n %s" % (flowrule.id, flowrule)) # process outbound flowrules of SAP ports if re.search('output=', flowrule.action): output = re.sub(r'.*output=([^;]*).*', r'\1', flowrule.action) if str(output) == str(sap_port.id): # found outbound rule log.debug("Found outbound rule (%s):\n %s" % (flowrule.id, flowrule)) if sap_port.sap is not None: log.debug("Found inter-domain SAP port: %s, %s" % (sap_port, sap_port.sap)) # rewrite TAG in action field if not re.search(r'TAG', flowrule.action): push_tag = ";TAG=<None>|<None>|%s" % flowrule.id flowrule.action += push_tag log.info("TAG conversion: extend action field in a " "flowrule of infra %s" % infra.id) log.info("updated flowrule (%s):\n %s" % (flowrule.id, flowrule)) else: log.debug("Found user SAP port: %s" % sap_port) # remove TAG from action field if re.search(r';TAG', flowrule.action): flowrule.action = re.sub(r'(;TAG=[^;]*)', r'', flowrule.action) log.info("TAG conversion: remove TAG action in a " "flowrule of infra %s" % infra.id) # add UNTAG to action field if not re.search(r'UNTAG', flowrule.action): flowrule.action += ';UNTAG' log.info("TAG conversion: add UNTAG action in a " "flowrule of infra %s" % infra.id) log.info("updated flowrule (%s):\n %s" % (flowrule.id, flowrule)) return slices @staticmethod def rebind_e2e_req_links (nffg, log=logging.getLogger("REBIND")): """ Search for splitted requirement links in the NFFG. If a link connects inter-domain SAPs rebind the link as an e2e requirement link. :param nffg: splitted NFFG object :type nffg: :class:`NFFG` :param log: additional logger :type log: :any:`logging.Logger` :return: rebounded NFFG :rtype: :class:`NFFG` """ log.debug( "Search for requirement link fragments to rebind as e2e requirement...") req_cache = [] def __detect_connected_sap (port): """ Detect if the given port is connected to a SAP. :param port: port object :type port: :any:`Port` :return: SAP port or None :rtype: :any:`Port` """ connected_port = [l.dst for u, v, l in nffg.real_out_edges_iter(port.node.id) if str(l.src.id) == str(port.id)] # If the number of detected nodes is unexpected continue to the next req if len(connected_port) < 1: log.warning("Skip edge rebinding: No connected node is detected for " "SAP port: %s" % port) return None elif len(connected_port) > 1: log.warning("Skip edge rebinding: Multiple connected nodes are " "detected for SAP port: %s: %s!" % (port, connected_port)) return None elif connected_port[0].node.type == NFFG.TYPE_SAP: return connected_port[0] else: return None for req in nffg.reqs: if req.src.node.type == NFFG.TYPE_SAP and \ req.dst.node.type == NFFG.TYPE_SAP: log.debug("Skip rebinding: Detected %s is already an end-to-end link!" % req) return nffg # Detect the node connected to the src port of req link src_sap_port = __detect_connected_sap(port=req.src) if src_sap_port: log.debug("Detected src SAP node: %s" % src_sap_port) else: continue # Detect the node connected to the dst port of req link dst_sap_port = __detect_connected_sap(port=req.dst) if dst_sap_port: log.debug("Detected dst SAP node: %s" % dst_sap_port) else: continue # Create e2e req link and store for rebinding e2e_req = req.copy() e2e_req.src = src_sap_port e2e_req.dst = dst_sap_port req_cache.append((req.src.node.id, req.dst.node.id, req.id, e2e_req)) # Rebind marked Requirement links if not req_cache: log.debug("No requirement link has been rebounded!") else: for src, dst, id, e2e in req_cache: nffg.del_edge(src=src, dst=dst, id=id) nffg.add_edge(src=e2e.src, dst=e2e.dst, link=e2e) log.debug("Rebounded requirement link: %s" % e2e) # Return the rebounded NFFG return nffg ############################################################################## # ----------------------- Single BiSBiS view generation ---------------------- ############################################################################## @staticmethod def generate_SBB_representation (nffg, sbb_id=DEFAULT_SBB_ID, add_sg_hops=False, log=logging.getLogger("SBB")): """ Generate the trivial virtual topology a.k.a one BisBis or Single BisBis representation with calculated resources and transferred NF and SAP nodes. :param nffg: global resource :type nffg: :class:`NFFG` :param add_sg_hops: recreate SG hop links also (default: False) :type add_sg_hops: bool :param log: additional logger :type log: :any:`logging.Logger` :return: single Bisbis representation :rtype: :class:`NFFG` """ if nffg is None: log.error("Missing global resource info! Skip OneBisBis generation!") return None # Create Single BiSBiS NFFG log.debug("Generate trivial SingleBiSBiS NFFG based on %s:" % nffg) log.debug("START SBB generation...") sbb = NFFG(id=sbb_id, name="Single-BiSBiS-View") # Create the single BiSBiS infra sbb_infra = sbb.add_infra(id="SingleBiSBiS", name="SingleBiSBiS", domain=NFFG.DEFAULT_DOMAIN, infra_type=NFFG.TYPE_INFRA_BISBIS) # Compute and add resources # Sum of available CPU try: sbb_infra.resources.cpu = sum( # If iterator is empty, sum got None --> TypeError thrown by sum (n.resources.cpu for n in nffg.infras if n.resources.cpu is not None) or None) except TypeError: sbb_infra.resources.cpu = None # Sum of available memory try: sbb_infra.resources.mem = sum( # If iterator is empty, sum got None --> TypeError thrown by sum (n.resources.mem for n in nffg.infras if n.resources.mem is not None) or None) except TypeError: sbb_infra.resources.mem = None # Sum of available storage try: sbb_infra.resources.storage = sum( # If iterator is empty, sum got None --> TypeError thrown by sum (n.resources.storage for n in nffg.infras if n.resources.storage is not None) or None) except TypeError: sbb_infra.resources.storage = None # Minimal available delay value of infras and links in DoV try: # Get the minimum delay in Dov to avoid false negative mapping result sbb_infra.resources.delay = min(itertools.chain( # If the chained iterators is empty --> ValueError thrown by sum (n.resources.delay for n in nffg.infras if n.resources.delay is not None), (l.delay for l in nffg.links if l.delay is not None))) except ValueError: sbb_infra.resources.delay = None # Maximum available bandwidth value of infras and links in DoV try: max_bw = max(itertools.chain( (n.resources.bandwidth for n in nffg.infras if n.resources.bandwidth is not None), (l.bandwidth for l in nffg.links if l.bandwidth is not None))) # Number of infras and links in DoV sum_infra_link = sum(1 for _ in itertools.chain(nffg.infras, nffg.links)) # Overestimate switching capacity to avoid false positive mapping result sbb_infra.resources.bandwidth = max_bw * sum_infra_link except ValueError: sbb_infra.resources.bandwidth = None log.debug("Computed SingleBiBBiS resources: %s" % sbb_infra.resources) # Add supported types s_types = set() for infra in nffg.infras: s_types = s_types.union(infra.supported) sbb_infra.add_supported_type(s_types) log.debug("Added supported types: %s" % s_types) log.debug("Added Infra BiSBiS: %s" % sbb_infra) log.log(VERBOSE, "SBB:\n%s" % sbb_infra.dump()) # Add existing NFs for nf in nffg.nfs: c_nf = sbb.add_nf(nf=nf.copy()) log.debug("Added NF: %s" % c_nf) log.log(VERBOSE, "NF:\n%s" % nf.dump()) # Discover and add NF connections for u, v, l in nffg.real_out_edges_iter(nf.id): if l.type != NFFG.TYPE_LINK_DYNAMIC: continue # Explicitly add links for both direction link1, link2 = sbb.add_undirected_link(port1=c_nf.ports[l.src.id], port2=sbb_infra.add_port( id=l.dst.id), p1p2id=l.id, p2p1id="%s-back" % l.id, dynamic=True, delay=l.delay, bandwidth=l.bandwidth) log.debug("Added connection: %s" % link1) log.debug("Added connection: %s" % link2) # Use SAP id --> SBB port id cache for delay matrix calculation delay_matrix_cache = {} # Add existing SAPs and their connections to the SingleBiSBiS infra for sap in nffg.saps: for p in sap.ports: if str(p.id).startswith("EXTERNAL"): log.debug("Detected EXTERNAL port: %s in SAP: %s! Skip adding..." % (p.id, sap.id)) continue c_sap = sbb.add_sap(sap_obj=sap.copy()) log.debug("Added SAP: %s" % c_sap) log.log(VERBOSE, "SAP:\n%s" % c_sap.dump()) # Discover and add SAP connections for u, v, l in nffg.real_out_edges_iter(sap.id): if len(sap.ports) > 1: log.warning("SAP contains multiple port!") sbb_infra_port = sbb_infra.add_port(id=str(c_sap.id), sap=sap.ports.container[0].sap) # Explicitly add links for both direction link1, link2 = sbb.add_undirected_link(port1=c_sap.ports[l.src.id], port2=sbb_infra_port, p1p2id=l.id, p2p1id="%s-back" % l.id, delay=l.delay, bandwidth=l.bandwidth) log.debug("Added connection: %s" % link1) log.debug("Added connection: %s" % link2) delay_matrix_cache[c_sap.id] = sbb_infra_port.id # Shortest paths in format of dict in dict keyed with node ids # e.g. SAP2 --> EE1 --> 4.9 latency_paths = NFFGToolBox.shortestPathsInLatency(G=nffg.network) log.log(VERBOSE, "Calculated latency paths for delay matrix:\n%s" % pprint.pformat(latency_paths)) log.log(VERBOSE, "Collected SAP ports for delay matrix:\n%s" % pprint.pformat(delay_matrix_cache)) dm_elements = itertools.permutations(delay_matrix_cache.keys(), 2) for src, dst in dm_elements: if src not in latency_paths: log.warning("Missing node: %s for latency paths: %s!" % (src, (src, dst))) continue if dst not in latency_paths[src]: log.warning("Missing node: %s for latency paths: %s!" % (src, (src, dst))) else: sbb_infra.delay_matrix.add_delay(src=src, dst=dst, delay=latency_paths[src][dst]) log.debug("Added delay matrix element [%s --> %s]: %s" % (src, dst, latency_paths[src][dst])) # Recreate flowrules based on NBalazs functions sg_hop_info = NFFGToolBox.get_all_sghop_info(nffg=nffg) log.log(VERBOSE, "Detected SG hop info:\n%s" % pprint.pformat(sg_hop_info)) log.debug("Recreate flowrules...") for sg_id, value in sg_hop_info.iteritems(): sg_src_node = value[0].node.id sg_src_port = value[0].id sg_dst_node = value[1].node.id sg_dst_port = value[1].id flowclass = value[2] fr_bw = value[3] fr_delay = value[4] fr_const = deepcopy(value[5]) fr_extra = value[6] fr_hop = sg_id sbb_src_port = [l.dst for u, v, l in sbb.network.out_edges_iter(sg_src_node, data=True) if l.src.id == sg_src_port and l.src.node.id == sg_src_node] if len(sbb_src_port) < 1: log.warning("No opposite Port(node: %s, id: %s) was found for SG hop: " "%s in new SingleBiSBiS node" % ( sg_src_node, sg_src_port, fr_hop)) continue if len(sbb_src_port) > 1: log.warning("Too much Port(node: %s, id: %s) was found for SG hop: " "%s in new SingleBiSBiS node: %s" % ( sg_src_node, sg_src_port, fr_hop, sbb_src_port)) continue sbb_src_port = sbb_src_port.pop() sbb_dst_port = [l.dst for u, v, l in sbb.network.out_edges_iter(sg_dst_node, data=True) if l.src.id == sg_dst_port and l.src.node.id == sg_dst_node] if len(sbb_dst_port) < 1: log.warning("No opposite Port(node: %s, id: %s) was found for SG hop: " "%s in new SingleBiSBiS node" % ( sg_dst_node, sg_dst_port, fr_hop)) continue if len(sbb_dst_port) > 1: log.warning("Too much Port(node: %s, id: %s) was found for SG hop: " "%s in new SingleBiSBiS node: %s" % ( sg_dst_node, sg_dst_port, fr_hop, sbb_dst_port)) continue sbb_dst_port = sbb_dst_port.pop() if flowclass: fr_match = "in_port=%s;flowclass=%s" % (sbb_src_port.id, flowclass) else: fr_match = "in_port=%s" % sbb_src_port.id fr_action = "output=%s" % sbb_dst_port.id if fr_extra is not None: fr_action += ";%s" % fr_extra if value[0].node.type == NFFG.TYPE_SAP and \ value[1].node.type == NFFG.TYPE_NF and \ value[0].sap is not None: # Update action for flowrule connecting inter-domain SAP to NF fr_action += ";UNTAG" fr = sbb_src_port.add_flowrule(id=fr_hop, match=fr_match, action=fr_action, bandwidth=fr_bw, delay=fr_delay, constraints=fr_const) log.debug("Added flowrule: %s" % fr) if add_sg_hops: log.debug("Recreate SG hops...") for sg_id, value in sg_hop_info.iteritems(): sg_src_port = value[0] sg_dst_port = value[1] hop_fc = value[2] hop_bw = value[3] hop_delay = value[4] hop_const = deepcopy(value[5]) sg = sbb.add_sglink(id=sg_id, src_port=sg_src_port, dst_port=sg_dst_port, flowclass=hop_fc, delay=hop_delay, bandwidth=hop_bw, constraints=hop_const) log.debug("Added SG hop: %s" % sg) else: log.debug("Skip SG hop recreation for the SingleBiSBiS!") NFFGToolBox.rewrite_interdomain_tags([(sbb.id, sbb)]) log.debug("END SBB generation...") # Return with Single BiSBiS infra return sbb ############################################################################## # ----------------------- Domain update functions ----------------------- ############################################################################## @classmethod def clear_domain (cls, base, domain, log=logging.getLogger("CLEAN")): """ Clean domain by removing initiated NFs and flowrules related to BiSBiS nodes of the given domain :param base: base NFFG object :type base: :class:`NFFG` :param domain: domain name :type domain: str :param log: additional logger :type log: :any:`logging.Logger` :return: the update base NFFG :rtype: :class:`NFFG` """ base_domain = cls.detect_domains(nffg=base) if domain not in base_domain: log.warning("No node was found in %s with domain: %s for cleanup! " "Leave NFFG unchanged..." % (base, domain)) return base for infra in base.infras: deletable_ports = set() deletable_nfs = set() # Skip nodes from other domains if infra.domain != domain: continue # Iterate over out edges from the current BB node for infra_id, node_id, link in base.real_out_edges_iter(infra.id): # Mark connected NF for deletion if base[node_id].type in (NFFG.TYPE_NF,): deletable_nfs.add(node_id) # Mark related dynamic port for deletion deletable_ports.add(link.src) if deletable_nfs: log.debug("Initiated NFs marked for deletion: %s on node: %s" % (deletable_nfs, infra.id)) # Remove NFs base.network.remove_nodes_from(deletable_nfs) if deletable_ports: log.debug("Dynamic ports marked for deletion: %s on node: %s" % (deletable_ports, infra.id)) # Remove dynamic ports for p in deletable_ports: base[infra.id].ports.remove(p) # Delete flowrules from ports for port in base[infra.id].ports: port.clear_flowrules() return base @classmethod def remove_domain (cls, base, domain, log=logging.getLogger("REMOVE")): """ Remove elements from the given ``base`` :class:`NFFG` with given ``domain`` name. :param base: base NFFG object :type base: :class:`NFFG` :param domain: domain name :type domain: str :param log: additional logger :type log: :any:`logging.Logger` :return: the update base NFFG :rtype: :class:`NFFG` """ log.debug("Remove nodes and edges which part of the domain: %s from %s..." % (domain, base)) # Check existing domains base_domain = cls.detect_domains(nffg=base) if domain not in base_domain: log.warning("No node was found in %s with domain: %s for removing! " "Leave NFFG unchanged..." % (base, domain)) return base deletable = set() for infra in base.infras: # Add deletable infras if infra.domain != domain: continue deletable.add(infra.id) # Add deletable SAP/NF connected to iterated infra for node_id in base.real_neighbors_iter(infra.id): if base[node_id].type in (NFFG.TYPE_SAP, NFFG.TYPE_NF): deletable.add(node_id) log.debug("Nodes marked for deletion: %s" % deletable) base.network.remove_nodes_from(deletable) if len(base): log.debug("Remained nodes after deletion: %s" % [n for n in base]) else: log.debug("No node was remained after splitting! ") log.debug("Search for inter-domain SAP ports and " "recreate associated SAPs...") cls.recreate_inter_domain_SAPs(nffg=base, log=log) # Check orphaned or not connected nodes and remove them log.debug("Trim orphaned nodes from updated NFFG...") cls.trim_orphaned_nodes(nffg=base, log=log) return base @classmethod def update_domain (cls, base, updated, log): """ Update the given ``updated`` nffg into the ``base`` NFFG. :param base: base NFFG object :type base: :class:`NFFG` :param updated: updated domain information :type updated: :class:`NFFG` :param log: additional logger :type log: :any:`logging.Logger` :return: the update base NFFG :rtype: :class:`NFFG` """ # Get new domain name domain = cls.detect_domains(nffg=updated) if len(domain) == 0: log.error("No domain detected in new %s!" % updated) return if len(domain) > 1: log.warning("Multiple domain name detected in new %s!" % updated) return domain = domain.pop() log.debug("Update elements of domain: %s in %s..." % (domain, base.id)) base_infras = {i.id for i in base.infras if i.domain == domain} if len(base_infras) == 0: log.warning("No Node was found in the base %s! Use merging..." % base) return cls.merge_new_domain(base=base, nffg=updated, log=log) # If infra nodes were removed or added, best way is to remerge domain else: # TODO - implement real update log.error("Domain update has not implemented yet!") ############################################################################## # ------------------- Status info-based update functions --------------------- ############################################################################## @classmethod def update_status_info (cls, nffg, status, log=logging.getLogger("UPDATE-STATUS")): """ Update the mapped elements of given nffg with given status. :param nffg: base NFFG object :type nffg: :class:`NFFG` :param status: new status :type status: str :param log: additional logger :type log: :any:`logging.Logger` :return: the update base NFFG :rtype: :class:`NFFG` """ log.debug("Add %s status for NFs and Flowrules..." % status) for nf in nffg.nfs: nf.status = status for infra in nffg.infras: for flowrule in infra.flowrules(): flowrule.status = status return nffg @classmethod def update_nffg_by_status (cls, base, updated, log=logging.getLogger("UPDATE-DOMAIN-STATUS")): """ Update status of the elements of the given ``base`` nffg based on the given ``updated`` nffg. :param base: base NFFG object :type base: :class:`NFFG` :param updated: updated domain information :type updated: :class:`NFFG` :param log: additional logger :type log: :any:`logging.Logger` :return: the update base NFFG :rtype: :class:`NFFG` """ # Update NF status base_nfs = {nf.id for nf in base.nfs} updated_nfs = {nf.id for nf in updated.nfs} log.debug("Update status of NF nodes: %s" % updated_nfs) for nf in base_nfs: if nf in updated_nfs: base[nf].status = updated[nf].status else: log.warning("Missing NF: %s from base NFFG: %s" % (nf, base)) # Update Flowrule status base_infras = {infra.id for infra in base.infras} updated_infras = {infra.id for infra in updated.infras} log.debug("Update status of flowrules in Infra nodes: %s" % updated_infras) for infra_id in base_infras: # Skip Infras from other domains if infra_id not in updated_infras: continue for port in base[infra_id].ports: if port.id not in updated[infra_id].ports: log.warning("Port: %s in Infra: %s is not in the updated NFFG! " "Skip flowrule status update in this Port..." % (port.id, infra_id)) continue for fr in base[infra_id].ports[port.id].flowrules: changed = False for ufr in updated[infra_id].ports[port.id].flowrules: # Theoretically in a port there is only one flowrule with a given # hop_id --> if the hop_ids are the same it must be the same fr if fr.id == ufr.id: fr.status = ufr.status changed = True break if not changed: log.warning("Flowrule: %s is not in the updated NFFG! " "Skip flowrule status update..." % fr) return base @classmethod def update_status_by_dov (cls, nffg, dov, init_status=NFFG.STATUS_PENDING, log=logging.getLogger("UPDATE-DOV-STATUS")): """ Update status of the elements of the given ``base`` nffg based on the given ``updated`` nffg. :param nffg: base NFFG object :type nffg: :class:`NFFG` :param dov: updated domain information :type dov: :class:`NFFG` :type init_status: init status of new element :param log: additional logger :type log: :any:`logging.Logger` :return: the update base NFFG :rtype: :class:`NFFG` """ # Update NF status nffg_nfs = {nf.id for nf in nffg.nfs} dov_nfs = {nf.id for nf in dov.nfs} log.debug("Update status of existing NF nodes: %s" % nffg_nfs) for nf in nffg_nfs: if nf in dov_nfs: nffg[nf].status = dov[nf].status else: nffg[nf].status = init_status # Update Flowrule status for infra in nffg.infras: for flowrule in infra.flowrules(): flowrule.status = init_status nffg_infras = {infra.id for infra in nffg.infras} dov_infras = {infra.id for infra in dov.infras} log.debug("Update status of existing flowrules in Infra nodes: %s" % nffg_infras) for infra_id in nffg_infras: if infra_id not in dov_infras: continue for port in nffg[infra_id].ports: if port.id not in dov[infra_id].ports: continue dov_frs = {f.id for f in dov[infra_id].ports[port.id].flowrules} for fr in nffg[infra_id].ports[port.id].flowrules: if fr.id not in dov_frs: fr.status = init_status for f in dov[infra_id].ports[port.id].flowrules: if f.id == fr.id: fr.status = f.status return nffg def filter_non_running_NFs (self, nffg, log=logging.getLogger("FILTER")): """ Create a new NFFG from the given ``nffg`` and filter out the stopped/failed Nfs. :param nffg: base NFFG object :type nffg: :class:`NFFG` :param log: additional logger :type log: :any:`logging.Logger` :return: None """ # TODO implement pass @classmethod def remove_deployed_services (cls, nffg, log=logging.getLogger("CLEAN")): """ Remove all the installed NFs, flowrules and dynamic ports from given NFFG. :param nffg: base NFFG :type nffg: :class:`NFFG` :param log: additional logger :type log: :any:`logging.Logger` :return: the cleaned nffg :rtype: :class:`NFFG` """ for infra in nffg.infras: log.debug("Remove deployed elements from Infra: %s" % infra.id) del_ports = [] del_nfs = [] for src, dst, link in nffg.network.out_edges_iter(data=True): if link.type == NFFG.TYPE_LINK_DYNAMIC and \ link.dst.node.type == NFFG.TYPE_NF: del_nfs.append(dst) del_ports.append(link.src.id) if del_nfs: nffg.network.remove_nodes_from(del_nfs) log.debug("Removed NFs: %s" % del_nfs) if del_ports: for id in del_ports: infra.del_port(id) log.debug("Removed dynamic ports: %s" % del_ports) log.debug("Clear flowrules...") for port in infra.ports: port.clear_flowrules() return nffg ############################################################################## # ----------------------- High level NFFG operations ------------------------ ############################################################################## @classmethod def _copy_node_type (cls, type_iter, target, log): """ Copies all element from iterator if it is not in target, and merges their port lists. :param type_iter: Iterator on objects to be added :type type_iter: :any: iterator on `Node` :param target: The target NFFG :type target: :any: `NFFG` :return: the updated base NFFG :rtype: :class:`NFFG` """ for obj in type_iter: if obj.id not in target: c_obj = target.add_node(deepcopy(obj)) log.debug("Copy NFFG node: %s" % c_obj) else: for p in obj.ports: if p.id not in target.network.node[obj.id].ports: target.network.node[obj.id].add_port(id=p.id, properties=p.properties) # TODO: Flowrules are not copied! log.debug("Copy port %s to NFFG element %s" % (p, obj)) return target @classmethod def _copy_node_type_with_flowrules (cls, type_iter, target, log, copy_shallow=False): """ Copies all element from iterator if it is not in target, and merges their port lists. :param type_iter: Iterator on objects to be added :type type_iter: :any: iterator on `Node` :param target: The target NFFG :type target: :any: `NFFG` :param log: additional logger :type log: :any:`logging.Logger` :return: the updated base NFFG :rtype: :class:`NFFG` """ for obj in type_iter: if obj.id not in target: c_obj = target.add_node(obj if copy_shallow else deepcopy(obj)) log.debug("Copy NFFG node: %s" % c_obj) else: for p in obj.ports: if p.id not in target.network.node[obj.id].ports: new_port = target.network.node[obj.id].add_port(id=p.id, properties=p.properties) log.debug("Copy port %s to NFFG element %s" % (p, obj)) if hasattr(p, 'flowrules'): log.debug("Merging flowrules of port %s of node %s" % (p.id, obj.id)) for fr in p.flowrules: if fr.id not in (f.id for f in new_port.flowrules): new_port.flowrules.append(fr if copy_shallow else copy.deepcopy(fr)) else: old_port = target.network.node[obj.id].ports[p.id] for fr in p.flowrules: if fr.id not in (f.id for f in old_port.flowrules): old_port.flowrules.append(fr if copy_shallow else copy.deepcopy(fr)) return target @classmethod def merge_nffgs (cls, target, new, log=logging.getLogger("UNION"), copy_shallow=False): """ Merges new `NFFG` to target `NFFG` keeping all parameters and copying port object from new. Comparison is done based on object id, resources and requirements are kept unchanged in target. :type copy_shallow: If set to True, set only references to the copied objects instead of deep copies. :param target: target NFFG object :type target: :class:`NFFG` :param new: NFFG object to merge from :type new: :class:`NFFG` :param log: additional logger :type log: :any:`logging.Logger` :return: the updated base NFFG :rtype: :class:`NFFG` """ # Copy Infras target = cls._copy_node_type_with_flowrules(new.infras, target, log, copy_shallow) # Copy NFs target = cls._copy_node_type(new.nfs, target, log) # Copy SAPs target = cls._copy_node_type(new.saps, target, log) # Copy remaining links which should be valid for u, v, link in new.network.edges_iter(data=True): if not target.network.has_edge(u, v, key=link.id): src_port = target.network.node[u].ports[link.src.id] dst_port = target.network.node[v].ports[link.dst.id] tmp_src, tmp_dst = link.src, link.dst link.src = link.dst = None c_link = link if copy_shallow else deepcopy(link) c_link.src = src_port c_link.dst = dst_port link.src, link.dst = tmp_src, tmp_dst target.add_link(src_port=src_port, dst_port=dst_port, link=c_link) log.debug("Copy Link: %s" % c_link) return target @classmethod def subtract_nffg (cls, minuend, subtrahend, consider_vnf_status=False, ignore_infras=False): """ Deletes every (all types of) node from minuend which have higher degree in subtrahend. And removes every (all types of) edge from minuend which are present in subtrahend. Changes minuend, but doesn't change subtrahend. NOTE: a node cannot be decreased to degree 0, because then it will be removed. :param minuend: minuend NFFG object :type minuend: :class:`NFFG` :param subtrahend: NFFG object to be subtracted :type subtrahend: :class:`NFFG` :param consider_vnf_status: consider VNF status :type consider_vnf_status: bool :param ignore_infras: ignore infra nodes :type ignore_infras: bool :return: NFFG which is minuend \ subtrahend :rtype: :class:`NFFG` """ if ignore_infras: minuend_degrees = {} for nf in minuend.nfs: minuend_degrees[nf.id] = len(minuend.adjacent_sghops(nf.id)) subtrahend_degrees = [(nf.id, len(subtrahend.adjacent_sghops(nf.id))) \ for nf in subtrahend.nfs] else: minuend_degrees = minuend.network.degree() subtrahend_degrees = subtrahend.network.degree().iteritems() for n, d in subtrahend_degrees: if n in minuend_degrees: if d >= minuend_degrees[n]: # If their status shall be considered AND the statuses are equal then # they are considered equal and it shouldn't be in the minuend. if not consider_vnf_status or (consider_vnf_status and subtrahend.network.node[ n].status == minuend.network.node[n].status): for edge_func in (minuend.network.in_edges_iter, minuend.network.out_edges_iter): for i, j, data in edge_func([n], data=True): if data.type == 'SG': minuend.del_flowrules_of_SGHop(data.id) minuend.del_node(minuend.network.node[n]) for i, j, k, d in subtrahend.network.edges_iter(keys=True, data=True): if minuend.network.has_edge(i, j, key=k): minuend.del_edge(i, j, k) if d.type == 'SG': minuend.del_flowrules_of_SGHop(d.id) return minuend @classmethod def generate_difference_of_nffgs (cls, old, new, ignore_infras=False): """ Creates two NFFG objects which can be used in NFFG.MODE_ADD and NFFG.MODE_DEL operation modes of the mapping algorithm. Doesn't modify input objects. If infra nodes shall be ignored, node degree comparison is only based on SGHops, but the output structure still contains the infras which were in the input. :param old: old NFFG object :type old: :class:`NFFG` :param new: NFFG object of the new config :type new: :class:`NFFG` :param ignore_infras: ignore infra nodes :type ignore_infras: bool :return: a tuple of NFFG-s for addition and deletion resp. on old config. :rtype: tuple """ add_nffg = copy.deepcopy(new) add_nffg.mode = NFFG.MODE_ADD del_nffg = copy.deepcopy(old) del_nffg.mode = NFFG.MODE_DEL add_nffg = NFFGToolBox.subtract_nffg(add_nffg, old, consider_vnf_status=True, ignore_infras=ignore_infras) del_nffg = NFFGToolBox.subtract_nffg(del_nffg, new, ignore_infras=ignore_infras) # WARNING: we always remove the EdgeReqs from the delete NFFG, this doesn't # have a defined meaning so far. for req in [r for r in del_nffg.reqs]: del_nffg.del_edge(req.src, req.dst, req.id) # NOTE: It should be possible to delete an NF, which is not connected # anywhere. With setting and using the operation field of NFs, NFs with # no connected SGhops are possible. # for n, d in [t for t in del_nffg.network.nodes(data=True)]: # if del_nffg.network.out_degree(n) + del_nffg.network.in_degree(n) == 0: # del_nffg.del_node(d) # NOTE: set operation delete to filter removing NFs which wouldn't have # left any more connected SGHops. for del_nf in del_nffg.nfs: if del_nf.id in old.network.nodes_iter() and \ del_nf.id not in new.network.nodes_iter(): del_nf.operation = NFFG.OP_DELETE # The output ADD NFFG shall still include the Infras even if they were # ignored during the difference calculation. # Copy data from new NFFG to old NFFG add_nffg.id = del_nffg.id = new.id add_nffg.name = del_nffg.name = new.name add_nffg.metadata = new.metadata.copy() del_nffg.metadata = new.metadata.copy() return add_nffg, del_nffg ############################################################################## # --------------------- Mapping-related NFFG operations ---------------------- ############################################################################## @staticmethod def _find_infra_link (nffg, port, outbound=True, accept_dyn=False): """ Returns the object of a static link which is connected to 'port'. If None is returned, we can suppose that the port is dynamic. :param nffg: NFFG object which contains port. :type nffg: :class:`NFFG` :param port: The port which should be the source or destination. :type port: :any:`Port` :param outbound: Determines whether outbound or inbound link should be found :type outbound: bool :param accept_dyn: accepts DYNAMIC links too :type outbound: bool :return: found static link or None :rtype: :any:`Link` """ link = None if outbound: edges_func = nffg.network.out_edges_iter else: edges_func = nffg.network.in_edges_iter for i, j, d in edges_func([port.node.id], data=True): if d.type == 'STATIC' or (accept_dyn and d.type == 'DYNAMIC'): if outbound and port.id == d.src.id: if link is not None: raise RuntimeError("InfraPort %s has more than one outbound " "links!" % port.id) link = d if not outbound and port.id == d.dst.id: if link is not None: raise RuntimeError("InfraPort %s has more than one inbound " "links!" % port.id) link = d if link is None: raise RuntimeError(" ".join(("Dynamic" if accept_dyn else "Static", "outbound" if outbound else "inbound", "link couldnt be found connected to port", str(port)))) return link @staticmethod def try_to_convert (id): """ Tries to convert a string type ID to integer (base 10). :param id: ID to be converted :type id: str :return: integer ID if it can be converted, string otherwise :rtype: int """ converted = id try: converted = int(id) except ValueError: pass return converted @staticmethod def _extract_flowclass (splitted_matches): """ Interprets the match field of a flowrule as everything is flowclass except "TAG=" and "in_port=" fields. Returns the string to be put into the flowclass field. Hopefully the order of the match segments are kept or irrelevant. :param splitted_matches: elements of the match field :type splitted_matches: list :return: flowclass value :rtype: str """ flowclass = "" for match in splitted_matches: field, mparam = match.split("=", 1) if field == "flowclass": flowclass += mparam elif field != "TAG" and field != "in_port": flowclass += "".join((field, "=", mparam)) if flowclass == "": return None else: return flowclass @staticmethod def _extract_additional_actions (splitted_actions): """ Interprets the action field of a flowrule as every action is additional, which are not used for traffic steering such as "UNTAG" and "output" actions. Returns the string to be but into the additional_actions field. :param splitted_actions: elements of the action fields :type splitted_actions: list :return: additional actions :rtype: str """ additional_actions = "" for action in splitted_actions: action_2_list = action.split("=", 1) field = action_2_list[0] mparam = "" if len(action_2_list) == 2: mparam = action_2_list[1] if field != "UNTAG" and field != "output" and field != "TAG": # if there is ate least one additional action, they should be # separated by ";"-s. if additional_actions != "": additional_actions += ";" additional_actions += field if mparam == "" else \ "".join((field, "=", mparam)) if additional_actions == "": return None else: return additional_actions @staticmethod def _get_flowrule_and_its_starting_port (infra, fr_id): """ Finds the Flowrule which belongs to the path of SGHop with ID 'fr_id'. :param infra: Infra object where we should look for the Flowrule :type infra: :any:`NodeInfra` :param fr_id: Flowrule/SGHop ID to look for :type fr_id: int :return: Flowrule and its containing InfraPort :rtype: tuple """ for p in infra.ports: for fr in p.flowrules: if fr.id == fr_id: return fr, p else: return None, None @staticmethod def get_inport_of_flowrule (infra, fr_id): """ Finds the Flowrule which belongs to the path of SGHop with ID 'fr_id'. :param infra: Infra object where we should look for the Flowrule :type infra: :any:`NodeInfra` :param fr_id: Flowrule/SGHop ID to look for :type fr_id: int :return: Flowrule and its containing InfraPort :rtype: tuple """ for p in infra.ports: for fr in p.flowrules: if fr.id == fr_id: return p else: raise RuntimeError("Couldn't find Flowrule for SGHop %s in Infra %s!" % (fr_id, infra.id)) @staticmethod def get_output_port_of_flowrule (infra, fr): """ Find the port object where this Flowrule sends the traffic out. :param infra: Infra object where we should look for the InfraPort. :type infra: :any:`NodeInfra` :param fr: flowrule object :type fr: :class:`Flowrule` :return: The output infra port. :rtype: :any:`InfraPort` """ for action in fr.action.split(";"): comm, arg = action.split("=", 1) if comm == 'output': if "://" in arg: # target-less flow rule -> skip return arg = NFFGToolBox.try_to_convert(arg) return infra.ports[arg] else: raise RuntimeError("Couldn't find output InfraPort object for Flowrule %s" " in Infra%s!" % (fr.id, infra.id)) @staticmethod def _check_flow_consistency (sg_map, fr_sg): """ Checks whether there is an inconsistency with Flowrule or SGHop 'fr_sg' and the other flowrules which are part of the SGHop's sequence OR SGHop which is in sg_map. Throws runtime exception if error found. Uses only the common fields of Flowrules and SGHops. 'flowclass' needs to be extracted if 'fr_sg' is not an SGHop. :param sg_map: SGHop sequence :type sg_map: dict :param fr_sg: checked flowentry or SGhop :type fr_sg: :class:`Flowrule` or :class:`EdgeSGLink` :return: None """ if isinstance(fr_sg, Flowrule): flowclass = NFFGToolBox._extract_flowclass(fr_sg.match.split(";")) else: flowclass = fr_sg.flowclass consistent = True if sg_map[fr_sg.id][2] != flowclass: consistent = False if (sg_map[fr_sg.id][3] is None or sg_map[fr_sg.id][3] == float("inf")) != \ (fr_sg.bandwidth is None or fr_sg.bandwidth == float("inf")): # If not both of them are None consistent = False elif (sg_map[fr_sg.id][3] is not None) and (fr_sg.bandwidth is not None): if consistent and math.fabs(sg_map[fr_sg.id][3] - fr_sg.bandwidth) > 1e-8: consistent = False if (sg_map[fr_sg.id][4] is None or sg_map[fr_sg.id][4] == 0.000000000) != \ (fr_sg.delay is None or fr_sg.delay == 0.0000000000): # If not both of them are None consistent = False elif (sg_map[fr_sg.id][4] is not None) and (fr_sg.delay is not None): if math.fabs(sg_map[fr_sg.id][4] - fr_sg.delay) > 1e-8: consistent = False if not consistent: raise RuntimeError("Not all data of a Flowrule equal to the other " "Flowrules of the sequence for the SGHop %s! Or the" " SGHop to be added differs in data from the existing" " SGHop!" % fr_sg.id) @staticmethod def get_all_sghop_info (nffg, return_paths=False, log=logging.getLogger("SG-RECREATE")): """ Returns a dictionary keyed by sghopid, data is [PortObjsrc, PortObjdst, SGHop.flowclass, SGHop.bandwidth, SGHop.delay, SGHop.constraints, SGHop.additional_action] list of port objects. Source and destination VNF-s can be retrieved from port references (port.node.id). The function 'recreate_all_sghops' should receive this exact NFFG object and the output of this function. It is based exclusively on flowrules, flowrule ID-s are equal to the corresponding SGHop's ID. If return_paths is set, the last element in the dict values is always an unordered list of the STATIC link references, which are used by the flowrule sequence. Doesn't change the input NFFG, only returns the SGHop values, SGHops are not added. :param nffg: the processed NFFG object :type nffg: :class:`NFFG` :param return_paths: flag for returning paths :type return_paths: bool :return: extracted values :rtype: dict """ class MissingFlowruleEndingPort(Exception): pass sg_map = {} for i in nffg.infras: for p in i.ports: for fr in p.flowrules: try: # if fr.external: # continue if fr.id not in sg_map: # The path is unordered!! path_of_shop = [] flowclass = NFFGToolBox._extract_flowclass(fr.match.split(";")) sg_map[fr.id] = [None, None, flowclass, fr.bandwidth, fr.delay, fr.constraints, None] # We have to find the BEGINNING of this flowrule sequence. inbound_link = NFFGToolBox._find_infra_link(nffg, p, outbound=False, accept_dyn=True) while inbound_link.type != 'DYNAMIC': path_of_shop.append(inbound_link) if inbound_link.src.node.type == 'SAP': break # The link is STATIC, and its src is not SAP so it is an Infra. prev_fr, prev_p = \ NFFGToolBox._get_flowrule_and_its_starting_port( inbound_link.src.node, fr.id) if prev_fr == None: raise MissingFlowruleEndingPort() NFFGToolBox._check_flow_consistency(sg_map, prev_fr) inbound_link = NFFGToolBox._find_infra_link(nffg, prev_p, outbound=False, accept_dyn=True) # 'inbound_link' is DYNAMIC here or it is STATIC and starts from # a SAP, # so the sequence starts here sg_map[fr.id][0] = inbound_link.src # We have to find the ENDING of this flowrule sequence. output_port = NFFGToolBox.get_output_port_of_flowrule(i, fr) if output_port is None: continue outbound_link = NFFGToolBox._find_infra_link(nffg, output_port, outbound=True, accept_dyn=True) while outbound_link.type != 'DYNAMIC': path_of_shop.append(outbound_link) if outbound_link.dst.node.type == 'SAP': break # The link is STATIC and its dst is not a SAP so it is an Infra. next_fr, _ = NFFGToolBox._get_flowrule_and_its_starting_port( outbound_link.dst.node, fr.id) if next_fr == None: raise MissingFlowruleEndingPort() # '_' is 'outbound_link.dst' next_output_port = NFFGToolBox.get_output_port_of_flowrule( outbound_link.dst.node, next_fr) NFFGToolBox._check_flow_consistency(sg_map, next_fr) outbound_link = NFFGToolBox._find_infra_link(nffg, next_output_port, outbound=True, accept_dyn=True) # the 'outbound_link' is DYNAMIC here or finishes in a SAP, so the # flowrule sequence finished here. sg_map[fr.id][1] = outbound_link.dst # the additional action is only present in the last flowrule of # the flowrule sequence. for last_fr in nffg.network.node[outbound_link.src.node.id]. \ flowrules(): # we need to retrieve this last flowrule if last_fr.id == fr.id: # extract the additional action if there is any additional_action = NFFGToolBox._extract_additional_actions( last_fr.action.split(";")) sg_map[fr.id][6] = additional_action break if return_paths: sg_map[fr.id].append(path_of_shop) except MissingFlowruleEndingPort: del sg_map[fr.id] log.warn("Couldn't find Flowrule for SGHop %s in Infra %s!" % (fr.id, i.id)) return sg_map @staticmethod def recreate_all_sghops (nffg): """ Extracts the SGHop information from the input NFFG, and creates the SGHop objects in the NFFG. :param nffg: the NFFG to look for SGHop info and to modify :type nffg: :class:`NFFG` :return: the modified NFFG :rtype: :class:`NFFG` """ sg_map = NFFGToolBox.get_all_sghop_info(nffg) for sg_hop_id, data in sg_map.iteritems(): src, dst, flowclass, bandwidth, delay, constraints, \ additional_actions = data if not (src and dst): continue if not nffg.network.has_edge(src.node.id, dst.node.id, key=sg_hop_id): nffg.add_sglink(src, dst, id=sg_hop_id, flowclass=flowclass, bandwidth=bandwidth, delay=delay, constraints=constraints, additional_actions=additional_actions) # causes unnecessary failures, when bandwidth or delay is missing # somewhere # else: # sg_hop = nffg.network[src.node.id][dst.node.id][sg_hop_id] # NFFGToolBox._check_flow_consistencity(sg_map, sg_hop) return nffg @staticmethod def retrieve_and_purge_all_tag_info (nffg): """ Searches all match fields of all flowrules for some possible tag values, which may come from inter domain traffic steering and may use technology specific tagging for a neighbouring domain. This info is gathered for all incoming flowrule sequences and returned in a dictionary keyed by the flowrule ID/SGHop ID. The gathered tags are deleted from all encountered flowrules (thus the input NFFG is modified). Tag info is also gathered from SGHops to the dictionary and consistency is checked if needed. :param nffg: base NFFG :type nffg: :class:`NFFG` :return: dict indexed by flowrule ID. :rtype: dict """ # the set of tags which shall be considered. Possibly needed to modify! possible_tag_infos = ("dl_vlan", "mpls_label") # WARNING: we can't differentiate ethertypes based only on the presence of # mpls_label, other fields should be processed too. But only one layer of # MLPS is supported in OVS currently, whose format we use in Flowrules untag_actions = ("strip_vlan", "pop_mpls:0x8847") tag_info_all_sghops = {} for infra in nffg.infras: for fr in infra.flowrules(): for match_element in fr.match.split(";"): match_element_splitted = match_element.split("=") if len(match_element_splitted) == 2: if match_element_splitted[0] in possible_tag_infos: if fr.id not in tag_info_all_sghops: # save the tag_info tag_info_all_sghops[fr.id] = match_element elif tag_info_all_sghops[fr.id] != match_element: # we have found another flowrule which has a different # possible_tag_info. raise RuntimeError( "The flowrule sequence of flowrule %s in infra %s has " "multiple match fields " "which may be used for interdomain traffic steering (" "%s) so it cannot be decided which one to use." % (fr, infra.id, possible_tag_infos)) # delete this match element from the match of the flowrule # and the ; separators too, in case they are left on the # beginning or ending. # we can delete this tag info from other flowrules of the same # flowrule sequence too, because the abstract tag will be used # during the mapping. fr.match = fr.match.replace(match_element, ""). \ rstrip(";").lstrip(";") # we need to gather tag_info-s from SGHops too, if flowrules are # not present, but SGHops are. If both are present, check consistency # between them. for sg in nffg.sg_hops: if sg.tag_info is not None: if sg.id in tag_info_all_sghops: if tag_info_all_sghops[sg.id] != sg.tag_info: raise RuntimeError( "Technology specific interdomain tag info is " "inconsistent in SGHop %s tag value: %s and " "one of its flowrules with tag value %s" % (sg, sg.tag_info, tag_info_all_sghops[sg.id])) else: # add the SGHop's tag_info to the dictionary for later usage. tag_info_all_sghops[sg.id] = sg.tag_info # we need to check whether any tag_info is already included in flowclass # field, if so, we need to delete it, because from now on, we take care # of this field of the match. if sg.flowclass is not None and sg.id in tag_info_all_sghops: sg.flowclass = sg.flowclass.replace(tag_info_all_sghops[sg.id], "").rstrip(";").lstrip(";") # if the flowclass disappears, let's set it back to None if sg.flowclass == "": sg.flowclass = None # we need to add the corresponding untag actions for every tag_info field. for sg in nffg.sg_hops: if sg.id in tag_info_all_sghops: for tag_info, untag_action in zip(possible_tag_infos, untag_actions): if tag_info in tag_info_all_sghops[sg.id]: tag_info_all_sghops[sg.id] = ( tag_info_all_sghops[sg.id], untag_action) # delete the possibly present untag action from the additional # actions, from now on, we take care of that. if sg.additional_actions is not None: sg.additional_actions = sg.additional_actions. \ replace(untag_action, "").rstrip(";").lstrip(";") # if there are no additional actions left, change it back to None if sg.additional_actions == "": sg.additional_actions = None return tag_info_all_sghops @staticmethod def redirect_flowrules (from_port, to_port, infra, mark_external=False, log=logging.getLogger("MOVE")): """ Redirect flowrules from `from` to `to_port` handling match/action fields. :param from_port: origin port :type from_port: :class:`InfraPort` :param to_port: target port :type to_port: :class:`InfraPort` :param infra: container node :type infra: :class:`NodeInfra` :param mark_external: mark redirected flowrule as external :type mark_external: bool :param log: additional logger :type log: :any:`logging.Logger` :return: None """ # Flowrules pointing to the from_port -> rewrite output reference in action for port in infra.ports: for fr in port.flowrules: output = fr.action.split(';', 1)[0].split('=', 1)[1] try: output = int(output) except ValueError: pass if output == from_port.id: # Rewrite output tag fr.action = fr.action.replace("output=%s" % output, "output=%s" % to_port.id, 1) if mark_external: fr.external = True log.debug("Rewritten inbound flowrule: %s" % fr) # Contained flowrules need to be rewritten and moved to the target port for fr in from_port.flowrules: # Rewrite in_port tag fr.match = fr.match.replace(fr.match.split(';', 1)[0], "in_port=%s" % to_port.id, 1) if mark_external: fr.external = True # Move flowrule to_port.flowrules.append(fr) log.debug("Moved outbound flowrule: %s" % fr) # Clear flowrule list del from_port.flowrules[:] @classmethod def merge_external_ports (cls, nffg, log=logging.getLogger("MERGE")): """ Merge detected external ports in nodes of given `nffg` and only leave the original SAP port. :param nffg: container node :type nffg: :class:`NFFG` :param log: additional logger :type log: :any:`logging.Logger` :return: None """ for infra in nffg.infras: for ext_port in [p for p in infra.ports if p.role == "EXTERNAL"]: log.debug("Found external port: %s" % ext_port) # Collect ports with the same SAP tag origin_port = [p for p in infra.ports if p.sap == ext_port.sap and p.role != "EXTERNAL"] if len(origin_port) != 1: log.error("Original port for external port: %s is not found uniquely:" " %s" % (ext_port, origin_port)) continue origin_port = origin_port.pop() log.debug("Detected original port for %s -> %s" % (ext_port.id, origin_port)) # Move flowrules log.debug("Redirect external port %s traffic into %s..." % (ext_port, origin_port)) cls.redirect_flowrules(from_port=ext_port, to_port=origin_port, infra=infra, mark_external=True, log=log) # Remove external port log.debug("Remove external SAP: %s" % ext_port.id) nffg.del_node(node=nffg[ext_port.id]) infra.ports.remove(ext_port) @classmethod def isStaticInfraPort (cls, G, p): """ Return true if there is a Static outbound or inbound EdgeLink, false if there is a Dynamic outbound or inbound link, throws exception if both, or warning if multiple of the same type. :param G: raw networkx graph object :type G: :class:`MultiDiGraph` :param p: port object :type p: :class:`Port` :return: whether the checked port is static Infra Port :rtype: bool """ static_link_found = False dynamic_link_found = False for edge_func, src_or_dst in ((G.out_edges_iter, 'src'), (G.in_edges_iter, 'dst')): for i, j, k, link in edge_func([p.node.id], data=True, keys=True): src_or_dst_port = getattr(link, src_or_dst) # check if we have found the right port if src_or_dst_port.id == p.id: if link.type == NFFG.TYPE_LINK_DYNAMIC: dynamic_link_found = True elif link.type == NFFG.TYPE_LINK_STATIC: static_link_found = True if dynamic_link_found and static_link_found: raise RuntimeError( "An InfraPort should either be connected to STATIC or DYNAMIC links " "Both STATIC and DYNAMIC in/outbound links found to port %s of Infra " "%s" % (p.id, p.node.id)) elif not dynamic_link_found and not static_link_found: # If a port is found which is not connected to any STATIC or DYNAMIC link return False elif static_link_found: return True elif dynamic_link_found: return False @classmethod def explodeGraphWithPortnodes (cls, G, id_connector_character='&'): """ Makes ports of the original graph into the nodes of a new NetworkX graph, adds delay values onto edge data. The returned graph can be used by standard networkx algorithms. WARNING: if called with a G, which has parallel nodes, the link data will be overridden with one of the parallel links. :param id_connector_character: character which is used to concatenate and separate port IDs from/to node IDs :type id_connector_character: str :param G: raw networkx graph object :type G: :class:`DiGraph` :return: created graph object :rtype: :class:`DiGraph` """ exploded_G = nx.DiGraph() for id, obj in G.nodes_iter(data=True): if obj.type == NFFG.TYPE_INFRA: static_ports_of_infra = filter( lambda pp, graph=G: NFFGToolBox.isStaticInfraPort(G, pp), obj.ports) # NOTE: obj.id == p.node.id because of iterating on obj.ports static_ports_of_infra_global_ids = map( lambda pp, c=id_connector_character: id_connector_character.join( (str(pp.id), str(pp.node.id))), static_ports_of_infra) exploded_G.add_nodes_from(static_ports_of_infra_global_ids) # all of them should already have the weight set to non negative float bandwidth_based_node_weight = obj.weight if hasattr(obj, 'weight') \ else 0.0 if type(obj.resources.delay) == type(dict): # delay is dict of dicts storing the directed distances between ports for port1, distances in obj.resources.delay.iteritems(): for port2, dist in distances.iteritems(): exploded_G.add_edge( id_connector_character.join((str(port1), obj.id)), id_connector_character.join((str(port2), obj.id)), attr_dict={'delay': dist, 'weight': bandwidth_based_node_weight}) else: # support filling the delay matrix even if the node has only a single # delay value, for partial backward compatibility and convenience universal_node_delay = obj.resources.delay if obj.resources.delay \ is not None else 0.0 for i in static_ports_of_infra_global_ids: for j in static_ports_of_infra_global_ids: if i != j: exploded_G.add_edge(i, j, attr_dict={'delay': universal_node_delay, 'weight': bandwidth_based_node_weight}) elif obj.type == NFFG.TYPE_SAP: sap_port_found = False for p in obj.ports: if not sap_port_found: exploded_G.add_node( id_connector_character.join((str(p.id), p.node.id))) else: exploded_G.add_node( id_connector_character.join((str(p.id), p.node.id))) # TODO: In this case multiple nodes in the exploded graph shuold be # connected with 0 delay links! # log.warn("Multiple ports found in SAP object!") # all ports are added as nodes, and the links between the ports denoting the # shortest paths inside the infra node are added already. # Add links connecting infra nodes and SAPs for i, j, link in G.edges_iter(data=True): if link.type == NFFG.TYPE_LINK_STATIC: # if a link delay is None, we should take it as 0ms delay. link_delay = link.delay if link.delay is not None else 0.0 link_weight = link.weight if hasattr(link, 'weight') else 0.0 exploded_G.add_edge( id_connector_character.join((str(link.src.id), str(i))), id_connector_character.join((str(link.dst.id), str(j))), attr_dict={'delay': link_delay, 'weight': link_weight, 'static_link_id': link.id}) return exploded_G @classmethod def addOriginalNodesToExplodedGraph (cls, sources, destinations, exploded_G, id_connector_character='&'): """ Modifies the exploded_G to add original nodes from G, and connects them with zero weighted and delayed links to all corresponding exploded p ort nodes. Elements of 'sources' are towards the graph and elements of 'destinations' are towards the original nodes. This is needed so we could calculate paths from an Infra node, without needing to decide which outbound port we want to use. :param G: :param exploded_G: :param id_connector_character: :return: """ # exploded_G.add_nodes_from(sources) # exploded_G.add_nodes_from(destinations) for i in exploded_G.nodes(): # if id_connector_character in i: original_node_id = NFFGToolBox.try_to_convert( i.split(id_connector_character)[1]) # the add_edge function adds the node if that is not there yet if original_node_id in sources: exploded_G.add_edge(original_node_id, i, attr_dict={'delay': 0, 'weight': 0}) elif original_node_id in destinations: exploded_G.add_edge(i, original_node_id, attr_dict={'delay': 0, 'weight': 0}) return exploded_G @classmethod def purgeExplodedGraphFromOriginalNodes (cls, G, exploded_G, id_connector_character='&'): """ Deletes all original nodes from the exploded graph and all of its connected edges to gain back the pure exploded graph without original nodes. :param G: :param exploded_G: :param id_connector_character: :return: """ for i in exploded_G.nodes(): if type(i) == str and id_connector_character in i: i = NFFGToolBox.try_to_convert(i.split(id_connector_character)[1]) if i in G and i in exploded_G: # removes all connected edges as well exploded_G.remove_node(i) return exploded_G @classmethod def extractDistsFromExploded (cls, G, exploded_dists, id_connector_character='&'): """ Extracts the shortest path length matrix from the calculation result on the exploded graph structure. :param G: raw networkx graph object :type G: :class:`DiGraph` :param exploded_dists: exploded graph structure :type exploded_dists: dict :param id_connector_character: character which is used to concatenate and separate port IDs from/to node IDs :type id_connector_character: str :return: shortest path length matrix in 2 dict :rtype: tuple """ dist = defaultdict(lambda: defaultdict(lambda: float('inf'))) min_dist_pairs = defaultdict(lambda: defaultdict(lambda: None)) for u, obju in G.nodes_iter(data=True): # SAPs and Infras are handled the same at this point. if obju.type == NFFG.TYPE_INFRA or obju.type == NFFG.TYPE_SAP: # a list of (global_port_id, dist_dict) tuples possible_dicts = filter( lambda tup, original_id=u, sep=id_connector_character: original_id == NFFGToolBox.try_to_convert(tup[0].split(sep)[1]), exploded_dists.iteritems()) for v, objv in G.nodes_iter(data=True): if objv.type == NFFG.TYPE_INFRA or objv.type == NFFG.TYPE_SAP: possible_ending_nodes = filter( lambda portid, original_id=v, sep=id_connector_character: original_id == NFFGToolBox.try_to_convert(portid.split(sep)[1]), exploded_dists.iterkeys()) # now we need to choose the minimum of the possible distances. for starting_node, d in possible_dicts: for ending_node in possible_ending_nodes: if ending_node in d: if d[ending_node] < dist[NFFGToolBox.try_to_convert(u)][ NFFGToolBox.try_to_convert(v)]: dist[NFFGToolBox.try_to_convert(u)][ NFFGToolBox.try_to_convert(v)] = d[ending_node] min_dist_pairs[u][v] = (starting_node, ending_node) # convert default dicts to dicts for safety reasons for k in dist: dist[k] = dict(dist[k]) for k in min_dist_pairs: min_dist_pairs[k] = dict(min_dist_pairs[k]) return dict(dist), dict(min_dist_pairs) @classmethod def extractPathsFromExploded (cls, exploded_paths_dict, min_dist_pairs, id_connector_character='&'): """ Extracts and transforms paths from the matrix of shortest paths calculated on the exploded graph structure. :param exploded_paths_dict: exploded paths :type exploded_paths_dict: dict :param min_dist_pairs: minimal distance pairs :type min_dist_pairs: :param id_connector_character: character which is used to concatenate and separate port IDs from/to node IDs :type id_connector_character: str :return: extracted paths :rtype: dict """ min_length_paths = defaultdict(lambda: defaultdict(lambda: None)) for original_starting_node, d in min_dist_pairs.iteritems(): for original_ending_node, tup in d.iteritems(): exploded_path = exploded_paths_dict[tup[0]][tup[1]] # get only the exploded IDs, which come from node ID-s path_with_only_node_ids = filter( lambda lid, sep=id_connector_character: sep in lid, exploded_path) # transform them back to the original ID-s path_with_original_node_ids = map( lambda lid, sep=id_connector_character: lid.split(sep)[1], path_with_only_node_ids) # the startgin and ending node ID may not be in place if path_with_original_node_ids[0] != original_starting_node: path_with_original_node_ids.insert(0, original_starting_node) if path_with_original_node_ids[-1] != original_ending_node: path_with_original_node_ids.append(original_ending_node) # a transit infra appears twice in the path after each other, because # there was an inbound and an outbound port. path_with_original_node_ids_no_duplicates = [ path_with_original_node_ids[0]] for n in path_with_original_node_ids: if n != path_with_original_node_ids_no_duplicates[-1]: path_with_original_node_ids_no_duplicates.append(n) path_with_original_node_ids_no_duplicates_str = map( lambda node_id: NFFGToolBox.try_to_convert(node_id), path_with_original_node_ids_no_duplicates) min_length_paths[original_starting_node][original_ending_node] = \ path_with_original_node_ids_no_duplicates_str # convert embedded default dicts for k in min_length_paths: min_length_paths[k] = dict(min_length_paths[k]) return dict(min_length_paths) @classmethod def extractPathsLinkIDsFromExplodedPath (cls, exploded_G, exploded_paths_list, id_connector_character='&'): """ Extracts the static link ID-s of the given paths based on the exploded graph. Assumes that the exploded_G generation added a 'static_link_id' attribute to the exploded versions of static links and the paths were calculated on the exploded graph where the original nodes are added. :param exploded_G: :param exploded_paths_list: :param id_connector_character: :return: list of link and node ids, preserving the order in exploded_paths """ extracted_paths_list = [] extracted_path_linkids_list = [] for exploded_path in exploded_paths_list: extracted_path = [] extracted_path_linkids = [] # the path must start from an original node! last_node = exploded_path[0] # integer node IDs must be converted if possible. extracted_path.append(NFFGToolBox.try_to_convert(last_node)) for node in exploded_path[1:]: if id_connector_character not in node and node != exploded_path[-1]: raise RuntimeError("Inner elements of the exploded path must contain " "the ID connector character (%s), but the path " "is %s" % (id_connector_character, exploded_path)) elif node != exploded_path[-1]: # integer node IDs must be converted if possible. original_node_id = NFFGToolBox.try_to_convert( node.split(id_connector_character)[1]) if original_node_id != extracted_path[-1]: # this graph must have such a link, otherwise there wouldn't be a # path if 'static_link_id' in exploded_G[last_node][node]: extracted_path.append(original_node_id) extracted_path_linkids.append(NFFGToolBox.try_to_convert( exploded_G[last_node][node]['static_link_id'])) else: # The last node is added by the exploded path's one-before-last # element, so this branch would be skipped anyway to avoid duplicating # the last node element pass # last node must be valid in the exploded_G last_node = node extracted_paths_list.append(extracted_path) extracted_path_linkids_list.append(extracted_path_linkids) return extracted_paths_list, extracted_path_linkids_list @classmethod def shortestPathsInLatency (cls, G, return_paths=False, exploded_G=None, id_connector_character='&'): """ Calculates shortest pased considering latencies between Infra node ports. Uses only the infrastructure part of an NFFG, non Infra nodes doesn't have internal forwarding latencies. :param G: raw networkx graph object :type G: :class:`DiGraph` :param return_paths: whether return with path :type return_paths: bool :param id_connector_character: character which is used to concatenate and separate port IDs from/to node IDs :type id_connector_character: str :return: shortest path and optionally the extracted path :rtype: dict or tuple """ if exploded_G is None: exploded_G = NFFGToolBox.explodeGraphWithPortnodes(G, id_connector_character) exploded_dists = nx.all_pairs_dijkstra_path_length(exploded_G, weight='delay') dists, min_dist_pairs = NFFGToolBox.extractDistsFromExploded(G, exploded_dists, id_connector_character) if return_paths: exploded_paths = nx.all_pairs_dijkstra_path(exploded_G, weight='delay') paths = NFFGToolBox.extractPathsFromExploded(exploded_paths, min_dist_pairs, id_connector_character) return paths, dists else: return dists @staticmethod def strip_nfs_flowrules_sghops_ports (nffg, log): """ Makes a bare NFFG object from the input. :param nffg: :param log: :return: """ # This removes most of the SGHops as well for nf_id in [n.id for n in nffg.nfs]: nffg.del_node(nf_id) # Remove the remaining SGHops for sgh in [sg for sg in nffg.sg_hops]: nffg.del_edge(sgh.src, sgh.dst, id=sgh.id) # Remove possible edge_reqs for req in [r for r in nffg.reqs]: nffg.del_edge(req.src, req.dst, id=req.id) # Clear all flowrules for infra in nffg.infras: for p in infra.ports: p.clear_flowrules() port_deleted = False try: NFFGToolBox._find_infra_link(nffg, p, True, True) except RuntimeError as re: log.warn( "InfraPort of %s may not have in/outbound link " "connected to it, message: %s" % (infra.id, re.message)) infra.del_port(p.id) port_deleted = True if not port_deleted: try: NFFGToolBox._find_infra_link(nffg, p, False, True) except RuntimeError as re: log.warn( "InfraPort of %s may not have in/outbound link " "connected to it, message: %s" % (infra.id, re.message)) infra.del_port(p.id) return nffg
Watch Guy's Grocery Games Season 20 Episode 7 Online. Best and Free Online streaming for Guy's Grocery Games TV Show. We picked the best sites to stream s20e07. Simply pick a site below and click "Watch Now!" button next to it. Some of the links may be broken, please upvote the working and good links so other users see those links for Guy's Grocery Games Season 20 Episode 7 s20e07 at the top of the list.
#!/usr/bin/env python import mapnik, ogr, osr, pyproj, os, sys, getopt from PIL import Image ### # Draw a Rhumb line with nPoints nodes # @author jonnyhuck ### def getRhumb(startlong, startlat, endlong, endlat, nPoints): # calculate distance between points g = pyproj.Geod(ellps='WGS84') # calculate line string along path with segments <= 1 km lonlats = g.npts(startlong, startlat, endlong, endlat, nPoints) # npts doesn't include start/end points, so prepend/append them and return lonlats.insert(0, (startlong, startlat)) lonlats.append((endlong, endlat)) return lonlats ### # Write a geometry to a Shapefile # @author jonnyhuck ### def makeShapefile(geom, name, layer_name): # set up the shapefile driver driver = ogr.GetDriverByName("ESRI Shapefile") # remove old shapefile if required if os.path.exists(name): driver.DeleteDataSource(name) # create the data source data_source = driver.CreateDataSource(name) # create the spatial reference, WGS84 srs = osr.SpatialReference() srs.ImportFromEPSG(4326) # create the layer layer = data_source.CreateLayer(layer_name, srs, ogr.wkbPolygon) # create the feature feature = ogr.Feature(layer.GetLayerDefn()) # Set the feature geometry using the point feature.SetGeometry(geom) # Create the feature in the layer (shapefile) layer.CreateFeature(feature) # Destroy the feature to free resources feature.Destroy() # Destroy the data source to free resources data_source.Destroy() ### # Make a single Gore # @author jonnyhuck ### def makeGore(central_meridian, gore_width, number, width, gore_stroke): # WGS84 source = osr.SpatialReference() source.ImportFromEPSG(4326) # Spherical Sinusoidal original = osr.SpatialReference() original.ImportFromProj4("+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +a=6371000 +b=6371000 +units=m +no_defs ") # Spherical Sinusoidal with gore-specific central meridian target = osr.SpatialReference() target.ImportFromProj4('+proj=sinu +lon_0=' + str(central_meridian) + ' +x_0=0 +y_0=0 +a=6371000 +b=6371000 +units=m +no_defs') # get the main points of the area of interest and transform halfWidth = gore_width / 2 mainPoints = ogr.Geometry(ogr.wkbLinearRing) mainPoints.AddPoint(central_meridian, 90) mainPoints.AddPoint(central_meridian - halfWidth, 0) mainPoints.AddPoint(central_meridian, -90) mainPoints.AddPoint(central_meridian + halfWidth, 0) # make the gore (using mainPoints in their wgs84 form) gore = getRhumb(mainPoints.GetX(1), mainPoints.GetY(0), mainPoints.GetX(1), mainPoints.GetY(2), 100) # get the first rhumb (N-S) gore2 = getRhumb(mainPoints.GetX(3), mainPoints.GetY(2), mainPoints.GetX(3), mainPoints.GetY(0), 100) # get the second rhumb (S-N) gore.extend(gore2) # combine them into one # create ring for the gore ring = ogr.Geometry(ogr.wkbLinearRing) for p in gore: ring.AddPoint(p[0], p[1]) # if invalid, do something more elegant than the fix below # if ring.IsValid() == False: # create polygon for the gore clipper = ogr.Geometry(ogr.wkbPolygon) clipper.AddGeometry(ring) clipper.CloseRings() # print clipper.ExportToJson() # write to shapefile makeShapefile(clipper, "tmp/tmp_gore" + str(number) + ".shp", "gore") # open countries file and get all of the geometry shapefile = "ne_110m_land/ne_110m_land.shp" driver = ogr.GetDriverByName("ESRI Shapefile") dataSource = driver.Open(shapefile, 0) layer = dataSource.GetLayer() land = ogr.Geometry(ogr.wkbGeometryCollection) for feature in layer: land.AddGeometry(feature.GetGeometryRef()) # clip against the gore landPanel = clipper.Intersection(land) # write to shapefile makeShapefile(landPanel, "tmp/tmp_land" + str(number) + ".shp", "land") # clean up clipper.Destroy() landPanel.Destroy() # make bounding box for the output transform = osr.CoordinateTransformation(source, original) # points for the bounding box bbPoints = ogr.Geometry(ogr.wkbLinearRing) bbPoints.AddPoint(0, 90) bbPoints.AddPoint(-halfWidth, 0) bbPoints.AddPoint(0, -90) bbPoints.AddPoint(halfWidth, 0) bbPoints.Transform(transform) # make the map map = mapnik.Map(width, width) map.srs = target.ExportToProj4() map.background = mapnik.Color('#ffffff') # add and style gore s = mapnik.Style() r = mapnik.Rule() polygon_symbolizer = mapnik.PolygonSymbolizer(mapnik.Color('#000000')) r.symbols.append(polygon_symbolizer) s.rules.append(r) map.append_style('land_style',s) ds = mapnik.Shapefile(file="./tmp/tmp_land" + str(number) + ".shp") land = mapnik.Layer('land') land.datasource = ds land.styles.append('land_style') map.layers.append(land) # add and style gore s = mapnik.Style() r = mapnik.Rule() line_symbolizer = mapnik.LineSymbolizer(mapnik.Color('#000000'), gore_stroke) r.symbols.append(line_symbolizer) s.rules.append(r) map.append_style('gore_style',s) ds = mapnik.Shapefile(file="./tmp/tmp_gore" + str(number) + ".shp") gore = mapnik.Layer('gore') gore.datasource = ds gore.styles.append('gore_style') map.layers.append(gore) # this grows the image if the map dimensions do not fit the canvas dimensions map.aspect_fix_mode = mapnik.aspect_fix_mode.GROW_CANVAS # Set the extent (need to set this to around 0 post transformation as this is the central meridian) map.zoom_to_box(mapnik.Envelope(bbPoints.GetX(1), bbPoints.GetY(0), bbPoints.GetX(3), bbPoints.GetY(2))) # render to file (and show me it) mapnik.render_to_file(map, "tmp/gore" + str(number) + ".png") ## # Main Function # @author jonnyhuck ## def main(argv): # make sure the tmp folder exists if not os.path.exists("tmp"): os.makedirs("tmp") # set defaults GORE_WIDTH_PX = 500 GORE_WIDTH_DEG = 60 OUT_PATH = "globe.png" GORE_OUTLINE_WIDTH = 4 # read in arguments try: opts, args = getopt.getopt(argv, "hp:d:g:o:") except getopt.GetoptError: print 'python makeGlobe.py -p [GORE_WIDTH_PX] -d [GORE_WIDTH_DEGREES] -g [GORE_OUTLINE_WIDTH] -o [OUT_PATH]' sys.exit(2) for opt, arg in opts: if opt == '-h': print 'python makeGlobe.py -p [GORE_WIDTH_PX] -d [GORE_WIDTH_DEGREES] -g [GORE_OUTLINE_WIDTH] -o [OUT_PATH]' sys.exit() elif opt == '-p': GORE_WIDTH_PX = int(arg) elif opt == '-d': GORE_WIDTH_DEG = int(arg) elif opt == '-g': GORE_OUTLINE_WIDTH = int(arg) elif opt == '-o': OUT_PATH = arg # verify values if GORE_WIDTH_PX < 0: print "invalid -p (GORE_WIDTH_PX) value: " + str(GORE_WIDTH_PX) print "GORE_WIDTH_DEG must be >0." sys.exit(0) if GORE_WIDTH_DEG < 15 or GORE_WIDTH_DEG > 120 or 360 % GORE_WIDTH_DEG > 0: print "invalid -d (GORE_WIDTH_DEG) value: " + str(GORE_WIDTH_PX) print "GORE_WIDTH_DEG must be >=15, <=120 and multiply into 360." print "Valid numbers include: 120, 90, 60, 30, 20, 15" sys.exit(0) # how many gores? I = 360 / GORE_WIDTH_DEG # make a test gore to see how big it is makeGore(0, GORE_WIDTH_DEG, 666, GORE_WIDTH_PX, 0) im666 = Image.open("tmp/gore666.png") w,h = im666.size # make 6 gores and join them together into a single image # TODO: HOW CAN I WORK OUT 1497? im = Image.new("RGB", (GORE_WIDTH_PX * I, h), "white") for i in range(0, I): cm = -180 + (GORE_WIDTH_DEG/2) + (GORE_WIDTH_DEG * i) # blunt fix - stops data wrapping around the world if i == I-1: cm -= 0.01 print cm makeGore(cm, GORE_WIDTH_DEG, i, GORE_WIDTH_PX, GORE_OUTLINE_WIDTH) im1 = Image.open("tmp/gore" + str(i) + ".png") im.paste(im1, (GORE_WIDTH_PX * i,0)) # clean up all tmp files files = os.listdir("tmp") for f in files: os.remove("tmp/"+f) # export and display im.save(OUT_PATH) im.show() ## # Python nonsense... # @author jonnyhuck ## if __name__ == "__main__": main(sys.argv[1:])
Things Always Get Worse | Hujambo! Also, there are no notes for the class; all I have is a ten-week course outline. The final exams take place in about five weeks from now. Oh, and she has a job in Nairobi, so she is only free one day a week, a day which I already have two lectures. She came yesterday and asked me if I would give her some notes for the class so she could read them at home. Haha, what notes? I’ll be making those, along with the notes for my other new class, the day before each lecture! Add on my other responsibilities at the college and then my chores at the house: buying food, cooking food, cleaning, washing clothes. When will I ever have time to watch Scrubs? That sounds like all three of us benefit! But then who will buy you coffee? Well I don’t know about you but I never drink just one cup of coffee… problem solved. well don’t you just love how carol comes up with her crazy ideas that leave you aaaaaaah!.i say help the student although you are in a tight spot. I got a fat 400pg Active Directory book on my desk I’m learning for work. The test to get certified is a couple G’s … is it me or am I always getting shafted trying to learn something!
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class RecordSet(Model): """Describes a DNS record set (a collection of DNS records with the same name and type). :param id: The ID of the record set. :type id: str :param name: The name of the record set. :type name: str :param type: The type of the record set. :type type: str :param etag: The etag of the record set. :type etag: str :param metadata: The metadata attached to the record set. :type metadata: dict :param ttl: The TTL (time-to-live) of the records in the record set. :type ttl: long :param arecords: The list of A records in the record set. :type arecords: list of :class:`ARecord <azure.mgmt.dns.models.ARecord>` :param aaaa_records: The list of AAAA records in the record set. :type aaaa_records: list of :class:`AaaaRecord <azure.mgmt.dns.models.AaaaRecord>` :param mx_records: The list of MX records in the record set. :type mx_records: list of :class:`MxRecord <azure.mgmt.dns.models.MxRecord>` :param ns_records: The list of NS records in the record set. :type ns_records: list of :class:`NsRecord <azure.mgmt.dns.models.NsRecord>` :param ptr_records: The list of PTR records in the record set. :type ptr_records: list of :class:`PtrRecord <azure.mgmt.dns.models.PtrRecord>` :param srv_records: The list of SRV records in the record set. :type srv_records: list of :class:`SrvRecord <azure.mgmt.dns.models.SrvRecord>` :param txt_records: The list of TXT records in the record set. :type txt_records: list of :class:`TxtRecord <azure.mgmt.dns.models.TxtRecord>` :param cname_record: The CNAME record in the record set. :type cname_record: :class:`CnameRecord <azure.mgmt.dns.models.CnameRecord>` :param soa_record: The SOA record in the record set. :type soa_record: :class:`SoaRecord <azure.mgmt.dns.models.SoaRecord>` """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'metadata': {'key': 'properties.metadata', 'type': '{str}'}, 'ttl': {'key': 'properties.TTL', 'type': 'long'}, 'arecords': {'key': 'properties.ARecords', 'type': '[ARecord]'}, 'aaaa_records': {'key': 'properties.AAAARecords', 'type': '[AaaaRecord]'}, 'mx_records': {'key': 'properties.MXRecords', 'type': '[MxRecord]'}, 'ns_records': {'key': 'properties.NSRecords', 'type': '[NsRecord]'}, 'ptr_records': {'key': 'properties.PTRRecords', 'type': '[PtrRecord]'}, 'srv_records': {'key': 'properties.SRVRecords', 'type': '[SrvRecord]'}, 'txt_records': {'key': 'properties.TXTRecords', 'type': '[TxtRecord]'}, 'cname_record': {'key': 'properties.CNAMERecord', 'type': 'CnameRecord'}, 'soa_record': {'key': 'properties.SOARecord', 'type': 'SoaRecord'}, } def __init__(self, id=None, name=None, type=None, etag=None, metadata=None, ttl=None, arecords=None, aaaa_records=None, mx_records=None, ns_records=None, ptr_records=None, srv_records=None, txt_records=None, cname_record=None, soa_record=None): self.id = id self.name = name self.type = type self.etag = etag self.metadata = metadata self.ttl = ttl self.arecords = arecords self.aaaa_records = aaaa_records self.mx_records = mx_records self.ns_records = ns_records self.ptr_records = ptr_records self.srv_records = srv_records self.txt_records = txt_records self.cname_record = cname_record self.soa_record = soa_record
The Vaulters are one of Endless Legend's eight playable factions. The true origin of the Vaulters is lost even to themselves. Their history, as they teach it to their children, begins within a great, metal habitat lodged beneath the surface of Auriga. Naturally conservative and wary, the Vaulters have limited relationships with the other peoples of Auriga. Copyright © 2015 Amplitude Studios.
# -*- coding: utf-8 -*- from functools import partial from cfme import web_ui as ui from cfme.web_ui.menu import nav from cfme.exceptions import StorageManagerNotFound from cfme.fixtures import pytest_selenium as sel from cfme.web_ui import Form, InfoBlock, MultiFill, Region, SplitTable, fill, flash from cfme.web_ui import form_buttons, paginator, toolbar from utils.update import Updateable from utils.wait import wait_for list_page = Region(locators=dict( managers_table=SplitTable( header_data=("//div[@id='list_grid']/div[@class='xhdr']/table/tbody", 1), body_data=("//div[@id='list_grid']/div[@class='objbox']/table/tbody", 1), ), )) cfg_btn = partial(toolbar.select, "Configuration") def _get_sm_name(o): if isinstance(o, StorageManager): return o.name else: return str(o) def _find_and_click_sm(context): """Incorporates searching through the page listing and clicking in the table. Also ensures waiting for the transition as there is no ajax hook.""" sm_name = _get_sm_name(context["storage_manager"]) for page in paginator.pages(): if sel.is_displayed("#no_records_div"): break if list_page.managers_table.click_cell("name", sm_name): sel.wait_for_element("#textual_div") # No ajax wait there :( return raise StorageManagerNotFound("Storage manager with name '{}' not found!".format(sm_name)) nav.add_branch( "storage_managers", { "storage_manager_new": lambda _: cfg_btn("Add a New Storage Manager"), "storage_manager": [ _find_and_click_sm, { "storage_manager_edit": lambda _: cfg_btn("Edit this Storage Manager"), } ] } ) class StorageManager(Updateable): """Represents the Storage / Storage Managers object. Allows interaction Args: name: Name of the Storage Namager as it appears in the UI. type: Type of the Storage Manager (eg. StorageManager.NETAPP_RS, ...) hostname: Host name of the machine. ip: IP Address of the machine. port: Port of the machine. credentials: :py:class:`dict` or :py:class:`StorageManager.Credential` """ class Credential(Updateable): def __init__(self, username=None, password=None): self.username = username self.password = password form = Form(fields=[ ("name", ui.Input("name")), ("type", ui.Select("select#sm_type")), ("hostname", ui.Input("hostname")), ("ip", ui.Input("ipaddress")), ("port", ui.Input("port")), ("credentials", Form(fields=[ ("username", ui.Input("userid")), ("password", MultiFill( ui.Input("password"), ui.Input("verify") )) ])), ]) validate = form_buttons.FormButton("Validate the credentials by logging into the Server") add = form_buttons.FormButton("Add this Storage Manager") ## # Types constants. Extend if needed :) NETAPP_RS = "NetApp Remote Service" def __init__(self, name=None, type=None, hostname=None, ip=None, port=None, credentials=None): self.name = name self.type = type self.hostname = hostname self.ip = ip self.port = port self.credentials = credentials def create(self, validate=True, cancel=False): sel.force_navigate("storage_manager_new") fill(self.form, self) if validate: sel.click(self.validate) if cancel: sel.click(form_buttons.cancel) else: sel.click(self.add) flash.assert_no_errors() def update(self, updates, validate=True, cancel=False): sel.force_navigate("storage_manager_edit", context={"storage_manager": self}) fill(self.form, updates) if validate: sel.click(self.validate) if cancel: sel.click(form_buttons.cancel) else: sel.click(form_buttons.save) flash.assert_no_errors() def delete(self, cancel=False): self.navigate() cfg_btn("Remove this Storage Manager from the VMDB", invokes_alert=True) sel.handle_alert(cancel) flash.assert_no_errors() def navigate(self): sel.force_navigate("storage_manager", context={"storage_manager": self}) def refresh_inventory(self): self.navigate() cfg_btn("Refresh Inventory", invokes_alert=True) sel.handle_alert(cancel=False) flash.assert_no_errors() def refresh_status(self): self.navigate() cfg_btn("Refresh Status", invokes_alert=True) sel.handle_alert(cancel=False) flash.assert_no_errors() def wait_until_updated(self, num_sec=300): def _wait_func(): self.navigate() return InfoBlock("Properties", "Last Update Status").text.strip().lower() == "ok" wait_for(_wait_func, num_sec=num_sec, delay=5) @property def exists(self): try: self.navigate() return True except StorageManagerNotFound: return False
Brand Your Event, Generate Sponsor Revenue! Branding your event is a great way to engage our sponsors. Logo Screen is fast and easy to use so your event is a success and generates more revenue from your sponsors! Our Logo Screen Branding Screens are super easy to use and reuse, while giving your brand maximum impact. Made standard in 46", 59" and 71" height, your full color logos will be placed every 15' on the screen maximizing your branding coverage. All Logo Screens have DuraSeam reinforced hems and grommets placed approximately every 2' on 150' rolls for ease of installation & storage. Come in multiple colored backgrounds: Red, Black, White, Forest Green, Royal Blue, Orange, Maroon, Carolina Blue, Brown, Tan, Purple, Kelly Green, Gray and Yellow. Our award winning creative department is highly skilled at taking your vision and using their talent and expertise to take your brand to a whole new level. Let us help turn your vision into reality. Fill out the form now fro a free design mock-up to help you visualize the possibilities.
# -*- coding: utf-8 -*- from setuptools import setup, find_packages setup( name='maxwell-faker', version='0.1.0', description='Maxwell faker for systems and load testing', url='https://github.com/movio/maxwell-faker', author='Nicolas Maquet and Nan Wu', author_email='[email protected], [email protected]', license='MIT', classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Topic :: Database', 'Topic :: Software Development :: Testing' 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', ], keywords='maxwell faker fake data generator json kafka mysql', packages=find_packages(exclude=['tests']), install_requires=[ 'PyYAML', 'kafka-python' ], entry_points={ 'console_scripts': [ 'maxwell-faker=maxwell_faker:daemon_main', 'maxwell-faker-bootstrap=maxwell_faker:bootstrap_main', 'maxwell-faker-gen=maxwell_faker:gen_main', ], }, )
Get ready for battle! Created by Disguise's elite team of sculpting designers, the 4 Ft. Plastic Long Sword is an example of one of their innovative designs. The prop is crafted of sturdy plastic and has fantastic fantasy detail featuring flashy gold accents. It is a massive 4-feet in overall length and breaks down into two pieces.
# -*- coding: utf-8 -*- # Created by Gustavo Del Negro <[email protected]> on 9/30/16. from django import forms from django.utils.translation import ugettext_lazy as _ from django.conf import settings from translation_server.models import * from django.urls import reverse class TranslationAdminForm(forms.ModelForm): languages_list = [lang[0].replace('-', '_') for lang in settings.LANGUAGES] translations_url = forms.CharField(max_length=200, widget=forms.HiddenInput()) translation_type_url = forms.CharField(max_length=200, widget=forms.HiddenInput()) last_translation_tag_url = forms.CharField(max_length=200, widget=forms.HiddenInput()) def __init__(self, *args, **kwargs): instance = kwargs.get('instance', None) kwargs.update(initial={ 'translations_url': reverse('api:translations-list'), 'translation_type_url': reverse('api:translations_types-list'), 'last_translation_tag_url': reverse('get_last_translation_tag', args=[0])[:-1], }) super(TranslationAdminForm, self).__init__(*args, **kwargs) def clean(self): cleaned_data = super(TranslationAdminForm, self).clean() for language in self.languages_list: if cleaned_data.get("text_"+language) == cleaned_data.get('auxiliary_text_'+language): self.add_error('auxiliary_text_' + language, forms.ValidationError(_('DTSE1'))) return cleaned_data def save(self, commit=False): translation = super(TranslationAdminForm, self).save(commit=commit) translation.save() translation.migration_created = False translation.save() return translation class Meta: model = Translation fields = "__all__" class TranslationTypeAdminForm(forms.ModelForm): class Meta: model = TranslationType fields = "__all__"
San Antonio criminal attorney Dayna L. Jones is experienced in handling any drug cases ranging from misdemeanor possession of marijuana to felony drug trafficking offenses. Because the drug charges are frequently and aggressively prosecuted in Texas, hiring a criminal defense lawyer to defend and protect your rights is essential. Attorney Dayna L. Jones has experience in working with individuals accused of all types of drug crimes. Drug crimes can vary from simple drug possession to large scale manufacturing and distribution cases. In Texas, there are harsh collateral consequences of a drug conviction, such as having your driver's license suspended for a misdemeanor marijuana conviction and also losing financial aid assistance for school. Depending on the amount of drugs and type of drugs you are charged with will determine whether you are facing misdemeanor or felony charges. For example, the penalties for felony cocaine or heroin charges are far more severe than possession of a small amount of marijuana. The penalties you may face depend on several things such as the type and quantity of drugs seized and whether there weapon found near the drugs. Because of the concern over illegal drugs crossing our borders from Mexico and elsewhere, San Antonio and surrounding law enforcement agents, aggressively prosecute drug offenses in both federal and statecourt. There are many constitutional issues that may arise in drug cases such as a Fourth Amendment search and seizure issue. Typically, the most successful defense to drug charges is challenging how the police seized the drugs that they are charging you with. Law enforcement officers must follow the requirements of the Texas Code of Criminal Procedure, the state of Texas Constitution and the U.S. Constitution. Evidence obtained illegally will be suppressed and cannot be used against you in your criminal case. If you are looking for a criminal attorney to defend you against drug charges, call attorney Dayna L. Jones at (210) 255-8532 so that you get the representation you need immediately. Phones are answered 24 hours a day and 7 days a week. The first consultation is always free.
# -*- coding: utf-8 -*- from django.utils.translation import ugettext_lazy as _ from geo.models import Geoname from geo.api import location from mieli.api import nexus from django import forms import re def __admin2_sanitizer(admin2): admin2.name = admin2.name.split(' ')[-1] return admin2 def build_administrative_2_divisions(admin1_code): admin2_divisions = map(__admin2_sanitizer, location.load_administrative_2_divisions(admin1_code.countrycode, admin1_code.admin1_code)) return [('', '')] + sorted(map(lambda x: (x.geonameid, x.name[0].upper() + x.name[1:]), admin2_divisions), key=lambda x: x[1]) def build_places(admin1_code): places = location.load_places(admin1_code.countrycode, admin1_code.admin1_code) return [('', '')] + sorted(map(lambda x: (x.geonameid, x.name[0].upper() + x.name[1:]), places), key=lambda x: x[1]) def set_extra_fields(**kwargs): form = kwargs['form'] form.initial['administrative_division'] = '' form.initial['place'] = '' fields = form.fields catalonia = filter(lambda x: x.name == 'Catalonia', location.load_administrative_divisions('ES'))[0] fields['administrative_division'] = forms.ChoiceField(label=_('Prov&iacute;ncia'), choices=build_administrative_2_divisions(catalonia)) fields['place'] = forms.ChoiceField(label=_('Municipi'), choices=build_places(catalonia)) return kwargs def clean_extra_fields(form, **kwargs): if not 'administrative_division' in form.cleaned_data: form.add_error('administrative_division', _('Indica una prov&iacute;ncia')) return if not 'place' in form.cleaned_data: form.add_error('place', _('Indica un municipi')) return if form.cleaned_data['administrative_division'] == '': form.add_error('administrative_division', _('Indica una prov&iacute;ncia')) return if form.cleaned_data['place'] == '': form.add_error('place', _('Indica un municipi')) return def on_user_creation(user, **kwargs): if 'location' in kwargs: if kwargs['location'] == None: if 'place' in kwargs: if kwargs['place'] == '': raise Exception('Place missing') kwargs['location'] = kwargs['place'] else: return else: if not 'place' in kwargs: return kwargs['location'] = kwargs['place'] place = Geoname.objects.get(geonameid=kwargs['location']) administrative_division_id = None if 'administrative_division' in kwargs: administrative_division_id = kwargs['administrative_division'] l = location.save(user, place, administrative_division_id) #nexus_ = nexus.get(name=l.admin2.name.split(' ')[-1]) #if nexus_ == None: # raise Exception("Nexus not found for '%s'" % l.admin2.name) #nexus_.join(user)
One of the first lovely roads that we would hit was the 403, just north of Chuncheon. The small road wound its way through farmland and along a stream, and there was very little traffic. The joyful realization that we were hitting the road was fully recognized, and Gravy and I were in high spirits. But the 403 began to rise, and directly ahead of us were mountains. We realized that there would be no circumnavigation of these mountains, as the grade began to steepen and the switchbacks began. Before long, I was off the bicycle, limping and pushing my beastly cargo up a hill. Gravy, who was significantly lighter packed than myself, was still pedaling up the monster, but that too would soon stop. At one point, about 30 minutes in, the road sign indicated...; no, boasted, that the grade was now 13%. After seeing this, I pushed my heap past a snake, dead on the road, having died presumably from trying to climb this wretched mountain. Then there was a sign saying that the top was 2 km away. Now, 2 km usually isn't much. But when every step you are taking is up a 13% grade, and you are carrying, among other things, at least 1000 pages of written word in your bulging panniers, then each meter is a monumental feat. Now, multiply that by 2000 and you get the idea. Everything that goes up must come down, and with cycling this is a very, very, good thing. Usually every climb is rewarded with a generous recompense of downhill riding, which usually triggers immediate amnesia to the struggle that came just before. But this particular hill was so nefariously steep that going down was almost as hard as going up. We had to ride the brakes hard, until the muscles in my hands were screaming. To add to this, they were also profusely sweating. In the event that my hand slipped off the brake, it may just be sufficient enough time for the gravity of the 13% grade to propel myself and my preposterously loaded bicycle right off the mountainside. After making it down safely, we rode into the city of Hwacheon, and had lunch by the river- a veritable feast of string cheese, beef jerky, corn on the cob, and lemon ginger cookies. I figured the more I ate, the more weight I would lose in the awesome parallel universe of cycle touring. After lunch, we picked up a pair of expensive cheap sunglasses (15,000 Won) for Gravy, and hit the road, the summer sun beating down fierce in the early afternoon. We were cruising along this path along the Bukhan-gang (North Han River) that went for miles and miles, and the heat forced us off the bicycles and into the water in the first of what would become the daily swimming ritual. The swim was like a baptism, a sloughing off of sweat and toil and entering into a new pact, a new covenant, with creation. All of the day's difficulties up to this point only make the water that much cooler and that much sweeter, and the experience that much more profound. And that is the crux of the bicycle trip. It's a microcosm of life. Moments of misery and suffering juxtaposed by the sweetest redemption of rising beauty and playful joy. It's a real live reminder of God's promise of redemption- to take the worst of life and to make it good, no, great! Better than we could possibly fathom. Beauty is only there after the testing fires of life. On the bicycle, this happens multiple times throughout one day. In one moment you may be struggling up a hill, rivulets of sweat stinging your eyes, wind in your face, cars screaming by, shotgunning you with exhaust. The next moment you're flying down a mountain, taking in layer upon rich layer of green mountains adorning billowing clouds like a deep jungle forest. The sweat is dried by a refreshing wind, and even cars fall behind as the country side welcomes you to its bosom, whispering its ancient melodies into your ears. And later, when trying to convey this with Gravy, we realize that no words are needed, the communication becomes extremely high context, written in the crinkles of smiles and satisfied sighs- the bliss of just being out there and on bicycles!
#!/usr/bin/python # -*- coding: utf-8 -*- # # --- BEGIN_HEADER --- # # edituser - Edit a MiG user # Copyright (C) 2003-2013 The MiG Project lead by Brian Vinter # # This file is part of MiG. # # MiG is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # MiG is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # -- END_HEADER --- # """Edit MiG user in user database and file system""" import getopt import os import sys from shared.useradm import init_user_adm, edit_user def usage(name='edituser.py'): """Usage help""" print """Edit existing user in MiG user database and file system. Usage: %(name)s [OPTIONS] -i USER_ID [FULL_NAME] [ORGANIZATION] [STATE] [COUNTRY] \ [EMAIL] [COMMENT] [PASSWORD] Where OPTIONS may be one or more of: -c CONF_FILE Use CONF_FILE as server configuration -d DB_FILE Use DB_FILE as user data base file -f Force operations to continue past errors -h Show this help -i CERT_DN CERT_DN of user to edit -v Verbose output """\ % {'name': name} # ## Main ### if '__main__' == __name__: (args, app_dir, db_path) = init_user_adm() conf_path = None force = False verbose = False user_id = None user_dict = {} opt_args = 'c:d:fhi:v' try: (opts, args) = getopt.getopt(args, opt_args) except getopt.GetoptError, err: print 'Error: ', err.msg usage() sys.exit(1) for (opt, val) in opts: if opt == '-c': conf_path = val elif opt == '-d': db_path = val elif opt == '-f': force = True elif opt == '-h': usage() sys.exit(0) elif opt == '-i': user_id = val elif opt == '-v': verbose = True else: print 'Error: %s not supported!' % opt if conf_path and not os.path.isfile(conf_path): print 'Failed to read configuration file: %s' % conf_path sys.exit(1) if verbose: if conf_path: print 'using configuration in %s' % conf_path else: print 'using configuration from MIG_CONF (or default)' if not user_id: print 'Error: Existing user ID is required' usage() sys.exit(1) if args: try: user_dict['full_name'] = args[0] user_dict['organization'] = args[1] user_dict['state'] = args[2] user_dict['country'] = args[3] user_dict['email'] = args[4] except IndexError: # Ignore missing optional arguments pass else: print 'Please enter the new details for %s:' % user_id print '[enter to skip field]' user_dict['full_name'] = raw_input('Full Name: ').title() user_dict['organization'] = raw_input('Organization: ') user_dict['state'] = raw_input('State: ') user_dict['country'] = raw_input('2-letter Country Code: ') user_dict['email'] = raw_input('Email: ') # Remove empty value fields for (key, val) in user_dict.items(): if not val: del user_dict[key] if verbose: print 'Update DB entry and dirs for %s: %s' % (user_id, user_dict) try: user = edit_user(user_id, user_dict, conf_path, db_path, force, verbose) except Exception, err: print err sys.exit(1) print '%s\nchanged to\n%s\nin user database and file system' % \ (user_id, user['distinguished_name']) print print 'Please revoke/reissue any related certificates!'
Chaky was founded in 2009. The company is privately owned and is run by its founders. As a company, our mission is to import the very highest quality of fish and plant. Chaky is a major importer of Tropical Fish and Aquarium Plants in Cyprus, exclusively specialized on the wholesale market. Prices are available only by request, Click here and fill in the form to sent you a catalogue along with the Prices. In our Website you can find many Tropical fishes available along with their detail information.
#!/usr/bin/env python # encoding: utf-8 # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import mock from ec2stack.helpers import read_file, generate_signature from . import Ec2StackAppTestCase class PasswordTestCase(Ec2StackAppTestCase): def test_get_password_data(self): data = self.get_example_data() data['Action'] = 'GetPasswordData' data['InstanceId'] = 'Test' data['Signature'] = generate_signature(data, 'POST', 'localhost', '/') get = mock.Mock() get.return_value.text = read_file( 'tests/data/valid_instance_get_password.json' ) get.return_value.status_code = 200 with mock.patch('requests.get', get): response = self.post( '/', data=data ) self.assert_ok(response) assert 'GetPasswordDataResponse' in response.data def test_invalid_get_password(self): data = self.get_example_data() data['Action'] = 'GetPasswordData' data['InstanceId'] = 'Test' data['Signature'] = generate_signature(data, 'POST', 'localhost', '/') get = mock.Mock() get.return_value.text = read_file( 'tests/data/invalid_instance_get_password.json' ) get.return_value.status_code = 431 with mock.patch('requests.get', get): response = self.post( '/', data=data ) self.assert_bad_request(response) assert 'InvalidInstanceId.NotFound' in response.data
Hip and knee arthritis are major issues with OLLI Members. Dr. So will review the risk factors and causes that contribute to hip and knee arthritis, examine the importance of proper diagnosis and care, and identify preventable actions and appropriate treatment plans. Assistant Clinical Professor, Department of Orthopedic Surgery, UCI Health.
# -*- coding: utf-8 -*- import langid import logging import pymongo import os import re import sys import twitter import urllib from datetime import datetime from datetime import timedelta from random import choice from random import randint logging.basicConfig() logger = logging.getLogger(__name__) class OrtograBot(object): """ OrtograBot searches for certain orthographic errors on twitter and reports back to the user with the proper form. """ def __init__(self, mongodb_url=None): """Setup MongoDB databse, Twitter API and rules""" mongodb_url = os.environ.get("MONGOHQ_URL", mongodb_url) self.debug = bool(os.environ.get("DEBUG", True)) client = pymongo.MongoClient(mongodb_url) self.db = client[mongodb_url.rsplit("/", 1)[1]] credentials = self.db.twitterCredentials.find_one() self.username = credentials["username"] self.api = twitter.Api( consumer_key=credentials["consumer_key"], consumer_secret=credentials["consumer_secret"], access_token_key=credentials["access_token_key"], access_token_secret=credentials["access_token_secret"] ) self.rules = [ { "search": u"tí", "message": u"ti nunca lleva tilde → " u"http://buscon.rae.es/dpd/?key=ti&origen=REDPD", "lang": u"es", }, { "search": u"cuidate", "message": u"cuídate es esdrújula, " u"por lo que siempre lleva tilde → " u"http://buscon.rae.es/dpd/?key=tilde#113", "lang": u"es", }, { "search": u"corazon", "message": u"corazón es aguda acabada en -n, " u"por lo que siempre lleva tilde → " u"http://buscon.rae.es/dpd/?key=tilde#111", "lang": u"es", }, { "search": u"bicep", "message": u"la palabra «bicep» no existe, " u"es bíceps, llana y con tilde por acabar en -s " u"precedida de consonante → " u"http://lema.rae.es/dpd/?key=b%C3%ADceps", "lang": u"es", }, { "search": u"biceps", "message": u"bíceps es llana y acabada en -s " u"precedida de consonante, " u"por lo que siempre lleva tilde → " u"http://lema.rae.es/dpd/?key=b%C3%ADceps", "lang": u"es", } ] self.punctuation = re.compile(r"[ \.,\?\!¡¿\n\t\-]+") self.emojis = [ u"🐭", u"🐮", u"🐱", u"🐵", u"😁", u"😂", u"😃", u"😄", u"😅", u"😆", u"😇", u"😈", u"😉", u"😊", u"😋", u"😌", u"😍", u"😎", u"😏", u"😰", u"😱", u"😲", u"😳", u""] def run_rule(self): """Run one random rule and reply to the twitter user if needed""" rule = choice(self.rules) # HACK: Using quote_plus and encode to fix a bug in python-twitter # search function search = urllib.quote_plus(rule["search"].encode("utf-8")) results = self.api.GetSearch(search) for status_obj in results: text_lower = status_obj.text.lower() if (rule["search"] not in self.punctuation.split(text_lower) or self.username.lower() in text_lower or status_obj.in_reply_to_status_id or status_obj.retweeted or langid.classify(status_obj.text)[0] != rule["lang"]): continue # To guarantee some human-like behaviour, # it only replies 25% of the time if randint(1, 100) > 75: # The 75% remaining, just tweet random messages if not self.debug: try: if randint(1, 100) > 85: # 85% from the message of the rule message = u"Recuerda: {} {}".format( rule["message"], choice(self.emojis) ) # Add a random emoji icon to messages to avoid # duplicated statuses self.api.PostUpdate(message) else: # 15% a friendly message message = (u"Soy ortolibán, " u"tu corrector ortográfico " u"amigo {}".format(choice(self.emojis))) self.api.PostUpdate(message) except Exception: logger.error("Unexpected error: %s", sys.exc_info()[0:2]) continue post_time = datetime.strptime(status_obj.created_at, '%a %b %d %H:%M:%S +0000 %Y') now = datetime.utcnow() one_day_ago = now - timedelta(days=1) reply_to = { "status_id": status_obj.id, "screen_name": status_obj.user.screen_name, "post_time": post_time, "text": status_obj.text, "reply_time": now, "search": rule["search"], "lang": rule["lang"], "place": status_obj.place, "coordinates": status_obj.coordinates, } user_already_messaged = self.db.messaged.find_one({ "screen_name": reply_to["screen_name"], "search": rule["search"], "lang": rule["lang"], "reply_time": {"$gte": one_day_ago} }) if not user_already_messaged: try: reply_message = u"@{} {}".format(reply_to["screen_name"], rule["message"]) if not self.debug: self.api.PostUpdate( reply_message, in_reply_to_status_id=status_obj.id ) self.db.messaged.insert(reply_to, safe=True) # We only reply to one user break except Exception: logger.error("Unexpected error: %s", sys.exc_info()[0:2])
Consider us, to get the best in class Three Tier Orbital Shakers with the affordable price tags. To keep them tuned with the industry laid standards, these Three Tier Orbital Shakers are made utilizing the certified raw materials and pioneering technology. As a reputed Manufacturer and Supplier, we timely provide these Three Tier Orbital Shakers in client’s requisite specifications.
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, Adam Števko <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: ipadm_prop short_description: Manage protocol properties on Solaris/illumos systems. description: - Modify protocol properties on Solaris/illumos systems. version_added: "2.2" author: Adam Števko (@xen0l) options: protocol: description: - Specifies the procotol for which we want to manage properties. required: true property: description: - Specifies the name of property we want to manage. required: true value: description: - Specifies the value we want to set for the property. required: false temporary: description: - Specifies that the property value is temporary. Temporary property values do not persist across reboots. required: false default: false choices: [ "true", "false" ] state: description: - Set or reset the property value. required: false default: present choices: [ "present", "absent", "reset" ] ''' EXAMPLES = ''' # Set TCP receive buffer size ipadm_prop: protocol=tcp property=recv_buf value=65536 # Reset UDP send buffer size to the default value ipadm_prop: protocol=udp property=send_buf state=reset ''' RETURN = ''' protocol: description: property's protocol returned: always type: string sample: "TCP" property: description: name of the property returned: always type: string sample: "recv_maxbuf" state: description: state of the target returned: always type: string sample: "present" temporary: description: property's persistence returned: always type: boolean sample: "True" value: description: value of the property. May be int or string depending on property. returned: always type: int sample: "'1024' or 'never'" ''' SUPPORTED_PROTOCOLS = ['ipv4', 'ipv6', 'icmp', 'tcp', 'udp', 'sctp'] class Prop(object): def __init__(self, module): self.module = module self.protocol = module.params['protocol'] self.property = module.params['property'] self.value = module.params['value'] self.temporary = module.params['temporary'] self.state = module.params['state'] def property_exists(self): cmd = [self.module.get_bin_path('ipadm')] cmd.append('show-prop') cmd.append('-p') cmd.append(self.property) cmd.append(self.protocol) (rc, _, _) = self.module.run_command(cmd) if rc == 0: return True else: self.module.fail_json(msg='Unknown property "%s" for protocol %s' % (self.property, self.protocol), protocol=self.protocol, property=self.property) def property_is_modified(self): cmd = [self.module.get_bin_path('ipadm')] cmd.append('show-prop') cmd.append('-c') cmd.append('-o') cmd.append('current,default') cmd.append('-p') cmd.append(self.property) cmd.append(self.protocol) (rc, out, _) = self.module.run_command(cmd) out = out.rstrip() (value, default) = out.split(':') if rc == 0 and value == default: return True else: return False def property_is_set(self): cmd = [self.module.get_bin_path('ipadm')] cmd.append('show-prop') cmd.append('-c') cmd.append('-o') cmd.append('current') cmd.append('-p') cmd.append(self.property) cmd.append(self.protocol) (rc, out, _) = self.module.run_command(cmd) out = out.rstrip() if rc == 0 and self.value == out: return True else: return False def set_property(self): cmd = [self.module.get_bin_path('ipadm')] cmd.append('set-prop') if self.temporary: cmd.append('-t') cmd.append('-p') cmd.append(self.property + "=" + self.value) cmd.append(self.protocol) return self.module.run_command(cmd) def reset_property(self): cmd = [self.module.get_bin_path('ipadm')] cmd.append('reset-prop') if self.temporary: cmd.append('-t') cmd.append('-p') cmd.append(self.property) cmd.append(self.protocol) return self.module.run_command(cmd) def main(): module = AnsibleModule( argument_spec=dict( protocol=dict(required=True, choices=SUPPORTED_PROTOCOLS), property=dict(required=True), value=dict(required=False), temporary=dict(default=False, type='bool'), state=dict( default='present', choices=['absent', 'present', 'reset']), ), supports_check_mode=True ) prop = Prop(module) rc = None out = '' err = '' result = {} result['protocol'] = prop.protocol result['property'] = prop.property result['state'] = prop.state result['temporary'] = prop.temporary if prop.value: result['value'] = prop.value if prop.state == 'absent' or prop.state == 'reset': if prop.property_exists(): if not prop.property_is_modified(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = prop.reset_property() if rc != 0: module.fail_json(protocol=prop.protocol, property=prop.property, msg=err, rc=rc) elif prop.state == 'present': if prop.value is None: module.fail_json(msg='Value is mandatory with state "present"') if prop.property_exists(): if not prop.property_is_set(): if module.check_mode: module.exit_json(changed=True) (rc, out, err) = prop.set_property() if rc != 0: module.fail_json(protocol=prop.protocol, property=prop.property, msg=err, rc=rc) if rc is None: result['changed'] = False else: result['changed'] = True if out: result['stdout'] = out if err: result['stderr'] = err module.exit_json(**result) from ansible.module_utils.basic import * if __name__ == '__main__': main()
Challenges for the Cantata project were the elimination of non-important messages on the directional signage. The new signage replaced the old sign system, which had three times the amount of messages that diluted crucial information. The signage on this project was custom fabrication; designed by Grafx Communications Group, manufactured by Image First Sign and installed by ASI Chicago. Most of the sign types are aluminum cabinets using standard extrusions and in-board aluminum posts with decorative caps. Some of the primary sign types featured an above grade concrete base with chamfered edges. The banner panels feature colorful digitally printed vinyl graphic overlays.
""" Eigenvalue spectrum of graphs. """ # Copyright (C) 2004-2013 by # Aric Hagberg <[email protected]> # Dan Schult <[email protected]> # Pieter Swart <[email protected]> # All rights reserved. # BSD license. import networkx as nx __author__ = "\n".join(['Aric Hagberg <[email protected]>', 'Pieter Swart ([email protected])', 'Dan Schult([email protected])']) __all__ = ['laplacian_spectrum', 'adjacency_spectrum'] def laplacian_spectrum(G, weight='weight'): """Return eigenvalues of the Laplacian of G Parameters ---------- G : graph A NetworkX graph weight : string or None, optional (default='weight') The edge data key used to compute each value in the matrix. If None, then each edge has weight 1. Returns ------- evals : NumPy array Eigenvalues Notes ----- For MultiGraph/MultiDiGraph, the edges weights are summed. See to_numpy_matrix for other options. See Also -------- laplacian_matrix """ from scipy.linalg import eigvalsh return eigvalsh(nx.laplacian_matrix(G,weight=weight).todense()) def adjacency_spectrum(G, weight='weight'): """Return eigenvalues of the adjacency matrix of G. Parameters ---------- G : graph A NetworkX graph weight : string or None, optional (default='weight') The edge data key used to compute each value in the matrix. If None, then each edge has weight 1. Returns ------- evals : NumPy array Eigenvalues Notes ----- For MultiGraph/MultiDiGraph, the edges weights are summed. See to_numpy_matrix for other options. See Also -------- adjacency_matrix """ from scipy.linalg import eigvals return eigvals(nx.adjacency_matrix(G,weight=weight).todense()) # fixture for nose tests def setup_module(module): from nose import SkipTest try: import scipy.linalg except: raise SkipTest("scipy.linalg not available")
Firstly, I am not aware of much discussion on the topic. But ClF3 is certainly well-known, and so it is trivial to suggest BrBr3, i.e. Br4 as an example of a halogen allotrope. Scifinder for example gives no literature hits on such a substance (either real or as a calculation; it is not always easy nowadays to tell which). So, is it stable? A B3LYP+D3/6-311++G(2d,2p) calculation reveals a free energy barrier of 17.2 kcal/mol preventing Br4 from dissociating to 2Br2. The reaction however is rather exoenergic, and so to stand any chance of observing Br4, one would probably have to create it at a low temperature. But say -78° would probably be low enough to give it a long lifetime; perhaps even 0°. So how to make it? This is pure speculation, but the red colour of bromine originates from (weak, symmetry forbidden) transitions, with energies calculated (for the 2Br2 complex) as 504, 492nm. Geometry optimisation of the first singlet excited state of 2Br2 produces the structure below, not that different from Br4. At least from these relatively simple calculations, it does seem as if an allotrope of bromine might be detectable spectroscopically, if not actually isolated as a pure substance. This entry was posted on Sunday, April 26th, 2015 at 6:25 pm and is filed under reaction mechanism. You can follow any responses to this entry through the RSS 2.0 feed. You can leave a response, or trackback from your own site. Actually, valence shell expansion does NOT occur in these species. For discussion of this point, see http://www.ch.imperial.ac.uk/rzepa/blog/?p=2687 There, I discuss another allotrope, this time of Iodine, I8, or I.I7, and how it is not hypervalent. Hypercoordinate yes. In fact this allotrope of iodine is not stable, since it extrudes I2 with no activation barrier. But I4 might be. Interesting suggestion but note that Br is a heavy element and to be sure of the reliability of ab initio calculations relativistic quantum chemical methods (DHF for instance) must be also employed. In such heavy elements it is quite probable that the effects of relativity may change even the qualitative picture emerging from non-relativistic calculations. Relativistic effects in Br are normally modest (for properties such as eg energies and structures, not so of course for eg magnetic shieldings), but of course this is not a state-of-the-art calculation and it would be good to know how modest they are. I think a transient species most certainly can be useful. Singlet oxygen is a transient “allotrope” of oxygen, but useful none the less as an oxidant, as also is ozone, another allotrope. Br4 is potentially a masked Br(+).Br3(-) and so, like singlet oxygen, might be a useful source of activated Br(+). NRT suggests that it is rather Br(-).Br3(+) than Br(+).Br3(-). Can you expand NRT. I presume it is not the gas law nRT. I used a Natural Resonance Theory (NRT) analysis, which is a part of the natural bond orbital (NBO) theory, to get the resonance structure weights by fitting electron density of Br4 available from the B3LYP/6-311++G(2d,2p) calculations. The Br4 structure had a D3H symmetry with the Br-Br distance of 2.5201 Å. The NRT analysis shows three dominant resonance structures Br(-).Br3(+) with identical weights 16.1*3 = 48.3%, three apparently long-bonded resonance structures (Br-Br).(Br—Br) with the weights of 8.1*3 = 24.3%, and one hypervalent structure BrBr(Br)Br, 7.9%. The natural population analysis charges and Mulliken charges predict a partial negative charge on the terminal bromine atoms of -0.13 and -0.17 a.u., respectively. The Br(-).Br3(+) structure is also consistent with the fact halogen trihalides have a less electronegative central atom. It seems that Br4 might be detectable by microwave spectroscopy if the apparatus could withstand the corrosive power of Br2. I have just done a search of the CSD. 155 hits are obtained for the anion Br3(-), but none for the cation Br3(+). So on this basis alone, there is little evidence for Br3(+), or for that matter Br(+) on its own. Re Peter’s suggestion of I4, and mindful that with this element, relativistic effects might be important, here is the calculated IRC (B3LYP/6-311++G(2d,2p), with a free energy barrier of ~13 kcal/mol. This is rather less than with Br4. Certainly worth checking what effect relativistic corrections would have on this value. Re relativistic corrections for eg I4. Calculating the energies (no geometry re-optimisations) with inclusion of the Douglas-Kroll-Hess 2nd order scalar relativistic correction to the two-electron integrals gives an activation energy of 15.8 kcal/mol (not free energy). Although a crude estimate of the effect of relativistic corrections, this result does suggest that they are not going to be massive! The basis set used for iodine was an all electron basis, as appropriate for making such a correction. Allotropic halogens. is licensed by Henry Rzepa under a Creative Commons Attribution-Noncommercial-Share Alike 3.0 United States License.
#!/usr/bin/env python ######################################################################################### # # Test function sct_propseg # # --------------------------------------------------------------------------------------- # Copyright (c) 2014 Polytechnique Montreal <www.neuro.polymtl.ca> # Author: Augustin Roux # modified: 2014/10/09 # # About the license: see the file LICENSE.TXT ######################################################################################### #import sct_utils as sct import commands import shutil import getopt import sys import time import sct_utils as sct import os import nibabel import numpy as np import math from tabulate import tabulate class param: def __init__(self): self.download = 0 self.remove_tmp_file = 0 self.verbose = 1 self.url_git = 'https://github.com/benjamindeleener/PropSeg_data.git' self.path_data = '/home/django/benjamindeleener/data/PropSeg_data/' param.data = ['t1','t2','dmri'] def main(): # Check input parameters try: opts, args = getopt.getopt(sys.argv[1:],'h:d:p:r:t:') except getopt.GetoptError: usage() for opt, arg in opts: if opt == '-h': usage() sys.exit(0) if opt == '-d': param.download = int(arg) if opt == '-p': param.path_data = arg if opt == '-t': if ',' in arg: param.data = arg.split(',') else: param.data = arg if opt == '-r': param.remove_tmp_file = int(arg) print param.data start_time = time.time() # download data if param.download: sct.printv('\nDownloading testing data...', param.verbose) # remove data folder if exist if os.path.exists('PropSeg_data'): sct.printv('WARNING: PropSeg_data already exists. Removing it...', param.verbose, 'warning') sct.run('rm -rf PropSeg_data') # clone git repos sct.run('git clone '+param.url_git) # update path_data field param.path_data = 'PropSeg_data' # get absolute path and add slash at the end param.path_data = sct.slash_at_the_end(os.path.abspath(param.path_data), 1) # segment all data in t1 folder results_t1 = [] sum_old,sum_new = 0,0 if 't1' in param.data: for dirname in os.listdir(param.path_data+"t1/"): if dirname not in ['._.DS_Store','.DS_Store']: for filename in os.listdir(param.path_data+"t1/"+dirname): if filename.startswith('t1') and not filename.endswith('_seg.nii.gz') and not filename.endswith('_detection.nii.gz') and not filename.endswith('.vtk'): print dirname, filename [d_old,d_new],[r_old,r_new] = segmentation(param.path_data+"t1/"+dirname+"/"+filename,param.path_data+"t1/"+dirname+"/",'t1') if d_old == 0: d_old = 'OK' sum_old = sum_old+1 else: d_old = 'Not In' if d_new == 0: d_new = 'OK' sum_new = sum_new+1 else: d_new = 'Not In' results_t1.append([dirname,d_old,d_new,round(r_old,2),round(r_new,2)]) # compute average results_t1.append(['average',sum_old,sum_new,np.mean([line[3] for line in results_t1]),np.mean([line[4] for line in results_t1])]) # segment all data in t2 folder results_t2 = [] sum_old,sum_new = 0,0 if 't2' in param.data: for dirname in os.listdir(param.path_data+"t2/"): if dirname not in ['._.DS_Store','.DS_Store']: for filename in os.listdir(param.path_data+"t2/"+dirname): if filename.startswith('t2_') and not filename.endswith('_seg.nii.gz') and not filename.endswith('_detection.nii.gz') and not filename.endswith('.vtk'): print dirname, filename [d_old,d_new],[r_old,r_new] = segmentation(param.path_data+"t2/"+dirname+"/"+filename,param.path_data+"t2/"+dirname+"/",'t2') if d_old == 0: d_old = 'OK' sum_old = sum_old+1 else: d_old = 'Not In' if d_new == 0: d_new = 'OK' sum_new = sum_new+1 else: d_new = 'Not In' results_t2.append([dirname,d_old,d_new,round(r_old,2),round(r_new,2)]) # compute average results_t2.append(['average',sum_old,sum_new,np.mean([line[3] for line in results_t2]),np.mean([line[4] for line in results_t2])]) results_dmri = [] sum_old,sum_new = 0,0 if 'dmri' in param.data: for dirname in os.listdir(param.path_data+"dmri/"): if dirname not in ['._.DS_Store','.DS_Store']: for filename in os.listdir(param.path_data+"dmri/"+dirname): if filename.startswith('dmri') and not filename.endswith('_seg.nii.gz') and not filename.endswith('_detection.nii.gz') and not filename.endswith('.vtk'): print dirname, filename [d_old,d_new],[r_old,r_new] = segmentation(param.path_data+"dmri/"+dirname+"/"+filename,param.path_data+"dmri/"+dirname+"/",'t1') if d_old == 0: d_old = 'OK' sum_old = sum_old+1 else: d_old = 'Not In' if d_new == 0: d_new = 'OK' sum_new = sum_new+1 else: d_new = 'Not In' results_dmri.append([dirname,d_old,d_new,round(r_old,2),round(r_new,2)]) # compute average results_dmri.append(['average',sum_old,sum_new,np.mean([line[3] for line in results_dmri]),np.mean([line[4] for line in results_dmri])]) if 't1' in param.data: print '' print tabulate(results_t1, headers=["Subject-T1","Detect-old","Detect-new","DC-old", "DC-new"], floatfmt=".2f") if 't2' in param.data: print '' print tabulate(results_t2, headers=["Subject-T2","Detect-old","Detect-new","DC-old", "DC-new"], floatfmt=".2f") if 'dmri' in param.data: print '' print tabulate(results_dmri, headers=["Subject-dmri","Detect-old","Detect-new","DC-old", "DC-new"], floatfmt=".2f") # display elapsed time elapsed_time = time.time() - start_time print 'Finished! Elapsed time: '+str(int(round(elapsed_time)))+'s\n' # remove temp files if param.remove_tmp_file: sct.printv('\nRemove temporary files...', param.verbose) sct.run('rm -rf '+param.path_tmp, param.verbose) e = 0 for i in range(0,len(results_t2)): if (results_t2[i][4] < 0.8 or results_t2[i][4] < results_t2[i][3]): e = e+1 sys.exit(e) def segmentation(fname_input, output_dir, image_type): # parameters path_in, file_in, ext_in = sct.extract_fname(fname_input) segmentation_filename_old = path_in + 'old/' + file_in + '_seg' + ext_in manual_segmentation_filename_old = path_in + 'manual_' + file_in + ext_in detection_filename_old = path_in + 'old/' + file_in + '_detection' + ext_in segmentation_filename_new = path_in + 'new/' + file_in + '_seg' + ext_in manual_segmentation_filename_new = path_in + 'manual_' + file_in + ext_in detection_filename_new = path_in + 'new/' + file_in + '_detection' + ext_in # initialize results of segmentation and detection results_detection = [0,0] results_segmentation = [0.0,0.0] # perform PropSeg old version sct.run('rm -rf '+output_dir+'old') sct.create_folder(output_dir+'old') cmd = 'sct_propseg_old -i ' + fname_input \ + ' -o ' + output_dir+'old' \ + ' -t ' + image_type \ + ' -detect-nii' sct.printv(cmd) status_propseg_old, output_propseg_old = commands.getstatusoutput(cmd) sct.printv(output_propseg_old) # check if spinal cord is correctly detected with old version of PropSeg cmd = "isct_check_detection.py -i "+detection_filename_old+" -t "+manual_segmentation_filename_old sct.printv(cmd) status_detection_old, output_detection_old = commands.getstatusoutput(cmd) sct.printv(output_detection_old) results_detection[0] = status_detection_old # compute Dice coefficient for old version of PropSeg cmd_validation = 'sct_dice_coefficient '+segmentation_filename_old \ + ' '+manual_segmentation_filename_old \ + ' -bzmax' sct.printv(cmd_validation) status_validation_old, output_validation_old = commands.getstatusoutput(cmd_validation) print output_validation_old res = output_validation_old.split()[-1] if res != 'nan': results_segmentation[0] = float(res) else: results_segmentation[0] = 0.0 # perform PropSeg new version sct.run('rm -rf '+output_dir+'new') sct.create_folder(output_dir+'new') cmd = 'sct_propseg -i ' + fname_input \ + ' -o ' + output_dir+'new' \ + ' -t ' + image_type \ + ' -detect-nii' sct.printv(cmd) status_propseg_new, output_propseg_new = commands.getstatusoutput(cmd) sct.printv(output_propseg_new) # check if spinal cord is correctly detected with new version of PropSeg cmd = "isct_check_detection.py -i "+detection_filename_new+" -t "+manual_segmentation_filename_new sct.printv(cmd) status_detection_new, output_detection_new = commands.getstatusoutput(cmd) sct.printv(output_detection_new) results_detection[1] = status_detection_new # compute Dice coefficient for new version of PropSeg cmd_validation = 'sct_dice_coefficient '+segmentation_filename_new \ + ' '+manual_segmentation_filename_new \ + ' -bzmax' sct.printv(cmd_validation) status_validation_new, output_validation_new = commands.getstatusoutput(cmd_validation) print output_validation_new res = output_validation_new.split()[-1] if res != 'nan': results_segmentation[1] = float(res) else: results_segmentation[1] = 0.0 return results_detection, results_segmentation if __name__ == "__main__": # call main function param = param() main()
Max Holloway watched UFC 222 from the couch, but not by choice. Holloway, the UFC’s reigning featherweight champion, was forced to withdraw from his March 3 title defense against Frankie Edgar after suffering an undisclosed ankle injury in training. Reflecting back on the situation Monday on The MMA Hour, Holloway said he was devastated by the news. His injury was not only a massive blow for the UFC, which had to scramble to find a replacement UFC 222 main event on one month’s notice, but also for himself, considering the stakes involved and Edgar’s history with Hawaiian legend B.J. Penn. “It was difficult, man. It was a tough pill to swallow, for sure,” Holloway told host Ariel Helwani on The MMA Hour. “I’ve never, ever pulled out of a fight in my life, not even in my amateur days. And for the first time to be that? So much history was on line. According to Holloway, his ankle injury was simply a matter of getting caught in an unlucky position in training rather than a lingering issue that slowly snuck up on him. Holloway said his physical therapy has been progressing well since and he’s hoping to receive doctor’s clearance to return to action soon. He’s been able to avoid surgery — and should be able to do so for good as long as his rehab continues to be effective at its current pace. “We’ve got one more doctor’s meeting before everything, in like a month or so, and we’ll see what happens,” Holloway said. “I don’t know when it is exactly, but that’s what we’re trying to do, we’re trying to get cleared. In the meantime, much has changed in Holloway’s absence. Holloway admitted to being surprised by the manner of Ortega’s victory, but he wasn’t shocked to see the jiu-jitsu wizard stamp his claim as the next title contender. “Ortega is undefeated for a reason,” Holloway said. “He’s good. He had knockouts on his record. I thought if he finished [Edgar], it was going to be a submission, because he’s pretty [skilled] there too and he’s super good on the ground. So I thought if it was a finish, it’d guaranteed be a submission. But he went out there and did his thing. Whenever it happens, a title fight between the 26-year-old Holloway and 27-year-old Ortega will be a true watershed moment for the UFC’s featherweight division, a battle between two young stars who have established their dominance over an older generation of 145-pound contenders. And Holloway agrees with Ortega that the matchup has the potential to be the biggest featherweight fight since Conor McGregor challenged Jose Aldo in 2015. “It’s huge,” Holloway said. “I’ll tell you this much, a lot of people are talking about this fight more than the Frankie fight, I feel like. So I can’t wait. Technique aside, Holloway noted that the one aspect of Ortega that has impressed him the most is the young challenger’s chin and toughness. “He can take a shot, so it should be a fun one,” Holloway said. The biggest question now is where and when Holloway vs. Ortega will take place, and the answer to that depends on two variables: Holloway’s health, plus the UFC’s ability to book a long-awaited event in Holloway’s home state of Hawaii. Local reports from Hawaiian news outlet KHON2 revealed earlier this month that negotiations between the UFC and the Hawaii Tourism Authority for an August event at Aloha Stadium had stalled, largely due to disagreements between the two parties on specific financial terms. Holloway heard the same reports, and while he’s still holding out hope that the issue can be resolved in time for August, he’s nonetheless confident that, at this point, UFC Hawaii is only a matter of time. “I’m over here waiting,” Holloway said. “I’m waiting just like you guys. “I think the talks with Hawaii have come a long way. This is the closest they’ve ever had to having a fight event down here, so we’ll see what happens. I don’t know, like how you said, that snag — usually when the NFL comes down [for the Pro Bowl], they give the NFL a $5 million compensation. So I think the UFC was trying to ask for that and then they kinda re-countered and they’re kinda just starting back-and-forth countering, so we’ll see what happens. I’m just glad that the HTA is actually taking the time to finally listen to the UFC and they’re finally talking about it. UFC 227 in Hawaii is the ultimate goal, but if it doesn’t come to fruition, Holloway said he would be just fine with a July return at UFC 226 instead. Still, though, everything depends on the success of his recovery.
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import time from telemetry.page.actions import page_action class RepaintContinuouslyAction(page_action.PageAction): """ Continuously repaints the visible content by requesting animation frames until self.seconds have elapsed AND at least three RAFs have been fired. Times out after max(60, self.seconds), if less than three RAFs were fired. """ def __init__(self, attributes=None): super(RepaintContinuouslyAction, self).__init__(attributes) def RunAction(self, tab): assert(hasattr(self, 'seconds')) start_time = time.time() tab.ExecuteJavaScript( 'window.__rafCount = 0;' 'window.__rafFunction = function() {' 'window.__rafCount += 1;' 'window.webkitRequestAnimationFrame(window.__rafFunction);' '};' 'window.webkitRequestAnimationFrame(window.__rafFunction);') time_out = max(60, self.seconds) min_rafs = 3 # Wait until al leat self.seconds have elapsed AND min_rafs have been fired. # Use a hard time-out after 60 seconds (or self.seconds). while True: raf_count = tab.EvaluateJavaScript('window.__rafCount;') elapsed_time = time.time() - start_time if elapsed_time > time_out: break elif elapsed_time > self.seconds and raf_count > min_rafs: break time.sleep(1)
Dr. Lorielle Alter was born in New York, but her family moved to South Florida and she became a sunshine-and-beach-loving Floridian at an early age. Dr. Alter graduated from Vanderbilt University and was the captain of Vanderbilt’s cheerleading squad. After graduating from Vanderbilt, Dr. Lorielle went on to Dental School at the University of Florida, where her passion grew for treating children, special needs patients, the medically compromised, and infants. She trained as a Pediatric Dental Specialist at UF, and earned her status as a Diplomate in the American Board of Pediatric Dentistry in 2015. She joined Dr. Maggie and our Team in 2017. Dr. Lorielle loves to travel with her husband, David and they enjoy exploring Tampa Bay’s culinary side. On the weekends, she can be seen running a 5k or cheering for her Florida Gators (except, of course when they play Vanderbilt…) Dr. Lorielle is an Auntie to 4 beautiful nieces and nephews who she loves to spoil! Dr. Maggie’s goal is to treat her patients as she would her own family, and believes every child should have positive dental experiences so they grow up loving their dentist! Dr. Maggie was raised in Southwest Florida by a close-knit family, and could always be found playing sports. She attended the University of Florida where she earned a Bachelor of Science degree in Microbiology and participated in many community events for kids. She continued at UF to pursue her dream of becoming a dentist. While in dental school, Dr. Maggie led many community service groups aimed at helping children. She participated in mission trips to the Dominican Republic where she provided dentistry in rural orphanages. Seeing children suffering from dental disease left a lasting impression. After graduating fourth in her class, she continued her journey as a Gator and served as Chief Resident during a residency program that specialized in pediatric dentistry. During that training she treated children with learning disabilities and complex medical conditions. In 2010, she became a Diplomate in the American Board of Pediatric Dentistry, a prestigious qualifying status that is earned by fewer than three percent of all dentists. She regularly attends Continuing Education conferences to ensure that she and her highly trained staff remain committed to excellence in pediatric dentistry and the latest in innovative technologies to enhance patient care as a pediatric dentist in Palm Harbor, FL. Dr. Maggie cherishes time spent with her three young boys, Colton, Camden, and Mason. She and her husband, Cliff, can be spotted around Tampa Bay with the boys, usually outdoors and on the water. The Davis family loves boating, fishing, biking, traveling and barbequing with friends.
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('contacts', '0014_auto_20170210_1659'), ] operations = [ migrations.AlterField( model_name='contact', name='group', field=models.ForeignKey(related_name='contacts', verbose_name='Cohort', to='groups.Group', help_text='Cohort to which this contact belongs.', null=True), ), migrations.AlterField( model_name='contact', name='groups', field=models.ManyToManyField(help_text='All cohorts to which this contact belongs.', related_name='all_contacts', verbose_name='Cohorts', to='groups.Group'), ), migrations.AlterField( model_name='contact', name='region', field=models.ForeignKey(related_name='contacts', verbose_name='Panel', to='groups.Region', help_text='Panel of this contact.'), ), ]
Take the largest, deepest saucepan you have and fill it with water. Bring to the boil (using the kettle speeds this process up) and add a pinch of salt and the spaghetti. Stir for the first 30 seconds to stop the pasta sticking and allow to cook for 10-12 minutes or until the pasta is al dente. For the sauce combine the diced onion with the garlic and a large glug of olive oil, adding in the star anises (don't forget to remove these when serving or warn people, they're hard and inedible). Allow to cook on a medium to low heat for 3-5 minutes until the onions turn translucent. Next add your seasoning followed by the chopped scallions and double cream and simmer on a medium to low heat for 5 minutes while the pasta cooks. To make the basil and almond Parmesan muddle all three ingredients until well combined. Once the pasta is cooked and you're happy with the seasoning of the sauce, add in the basil and almond Parmesan. Serve with a sprinkle of the lemon zest plus a drizzle of extra virgin olive oil and extra fresh black pepper.
# # System Imports # import unittest # import mock # from mock import patch # # ZeroMQ Imports # import zmq # from zmq.eventloop.ioloop import IOLoop # # Local Imports # import zmq_transport # from zmq_transport.server.zmq_server import ( # ServerSocket, # ApplicationHandler, # ClientHandler, # Publisher, # Server # ) # from zmq_transport.config.settings import ( # ENDPOINT_APPLICATION_HANDLER, # ENDPOINT_CLIENT_HANDLER, # ENDPOINT_PUBLISHER # ) # class BaseServerTest(unittest.TestCase): # """ # Base test class for server. # """ # def __init__(self, *args, **kwargs): # unittest.TestCase.__init__(self, *args, **kwargs) # self.endpoint = "tcp://127.0.0.1:6789" # def tearDown(self): # pass # class ServerSocketTest(BaseServerTest): # """ # Test class for zmq_transport.server.zmq_server.ServerSocket # """ # def test_serverSocket_run(self): # """ # Tests ServerSocket.run # """ # # Mocking zmq # context = mock.Mock(spec_set=zmq.Context) # socket = mock.Mock(spec_set=zmq.ROUTER) # server_sock = ServerSocket(context.socket(socket), self.endpoint) # server_sock.run() # class ApplicationHandlerTest(BaseServerTest): # """ # Test class for zmq_transport.server.zmq_server.ApplicationHandler # """ # def test_applicationHandler_run(self): # """ # Tests ApplicationHandler.run # """ # # Mocking zmq # context = mock.Mock(spec_set=zmq.Context) # app_sock = ApplicationHandler(self.endpoint, context) # app_sock.run() # class ClientHandlerTest(BaseServerTest): # """ # Test class for zmq_transport.server.zmq_server.ClientHandler # """ # def test_clientHandler_run(self): # """ # Tests ClientHandler.run # """ # # Mocking zmq # context = mock.Mock(spec_set=zmq.Context) # client_sock = ClientHandler(self.endpoint, context) # client_sock.run() # class PublisherTest(BaseServerTest): # """ # Test class for zmq_transport.server.zmq_server.Publisher # """ # def test_publisher_run(self): # """ # Tests Publisher.run # """ # # Mocking zmq # context = mock.Mock(spec_set=zmq.Context) # pub_mock = Publisher(self.endpoint, context) # pub_mock.run() # class ServerTest(BaseServerTest): # """ # Test class for zmq_transport.server.zmq_server.Server # """ # def setUp(self): # self.frontend_patcher = patch( # "zmq_transport.server.zmq_server.ClientHandler") # self.backend_patcher = patch( # "zmq_transport.server.zmq_server.ApplicationHandler") # self.publisher_patcher = patch( # "zmq_transport.server.zmq_server.Publisher") # self.frontend_mock = self.frontend_patcher.start() # self.backend_mock = self.backend_patcher.start() # self.publisher_mock = self.publisher_patcher.start() # def check_mocks(self, server): # # Check if classes were correctly mocked. # assert zmq_transport.server.zmq_server.ClientHandler is\ # self.frontend_mock # assert zmq_transport.server.zmq_server.ApplicationHandler is\ # self.backend_mock # assert zmq_transport.server.zmq_server.Publisher is\ # self.publisher_mock # assert server.frontend is self.frontend_mock # assert server.backend is self.backend_mock # assert server.publisher is self.publisher_mock # def test_server__prepare_reactor(self): # """ # Tests Server._prepare_reactor # """ # server = Server(ENDPOINT_APPLICATION_HANDLER, ENDPOINT_CLIENT_HANDLER, # ENDPOINT_PUBLISHER) # # Patch Server instance. # server._context = mock.Mock(spec_set=zmq.Context) # server._loop = mock.Mock(spec_set=IOLoop) # server.frontend = self.frontend_mock # server.backend = self.backend_mock # server.publisher = self.publisher_mock # self.check_mocks(server) # with patch("zmq.eventloop.zmqstream.ZMQStream") as zmqstream_mock: # server._prepare_reactor() # # TODO: Check if zmqstream_mock is called withing wrap_zmqstream # self.assertEqual(server.frontend.wrap_zmqstream.called, True) # self.assertEqual(server.publisher.wrap_zmqstream.called, True) # self.assertEqual(server.backend.wrap_zmqstream.called, True) # expected = [(("on_send", server.handle_snd_update_client), ), # (("on_recv", server.handle_rcv_update_client), )] # self.assertEqual(server.frontend.register_handler.call_args_list, # expected) # expected = [(("on_send", server.handle_snd_update_client), )] # self.assertEqual(server.publisher.register_handler.call_args_list, # expected) # expected = [(("on_send", server.handle_snd_update_app), ), # (("on_recv", server.handle_rcv_update_app), )] # self.assertEqual(server.backend.register_handler.call_args_list, # expected) # def tearDown(self): # self.frontend_patcher.stop() # self.backend_patcher.stop() # self.publisher_patcher.stop()
Two twin sisters gatecrashed a wedding banquet and repaid the happy couple by stealing a piece of luggage from the bride. Tiffany and Tiziana Kang, 30, joined the celebrations at the Shin Yeh restaurant in Liang Court on Nov 19 last year, even though they had not been invited and did not know anyone there. After enjoying the dinner, they were queueing to give their blessings to the couple - Mr Jamie Chew, 30, and Ms Tan Huiyan, 26 - when they passed a table where a groomsman was standing. Tiffany lied to him that she had been instructed to collect a pink piece of luggage belonging to the bride. It contained items such as coffee mugs and pens, as well as a roll of film, worth a total of around $150. Ms Tan later discovered that it had gone missing and that the twins were uninvited guests. She then found the sisters at Liang Court's taxi stand and confronted them. Tiffany claimed that she thought the piece of luggage was a lost item, and then attempted to leave in a taxi, but she was stopped by Mr Chew. Yesterday, she was sentenced to 10 days in jail and Tiziana, five days. It was revealed in court that earlier on the day of the wedding, the twins had stolen three handbags and a wallet, worth a total of $640, from a Guess Accessories store in Bugis Junction. Despite the items having sensor tags attached, the twins somehow managed to sneak them out of the store. In mitigation, it was revealed that the sisters were adopted as children and had a tumultuous relationship with their foster parents. Both were also diagnosed with a psychiatric condition at age 13, although the nature of this condition was not revealed in court. They had also had an argument with their family members on the day of the crime and left the house in a distressed state before they began their stealing spree. Deputy Public Prosecutor Rachel Tan said that while the twins had a history of mental illness, their condition "did not contribute to the crime". For theft, the duo could have faced up to seven years in jail and been liable to a fine.
# -*- coding: utf-8 -*- import requests import sqlite3 from distutils.version import LooseVersion import re from bs4 import BeautifulSoup import click from tqdm import tqdm import dominate from dominate.tags import * PORTAL_NAME = 'http://soft.mydiv.net' DOWNLOAD_COM_SEARCH = 'http://download.cnet.com/1770-20_4-0.html?platform=Windows&searchtype=downloads&query=' SOFTPEDIA_SEARCH = 'http://win.softpedia.com/dyn-search.php?search_term=' def unique(seq): return list(set(seq)) def get_programs_from_section(url): result = [] soup = BeautifulSoup(download_page(url), "html.parser") if not soup: print("parse_site no soup!") return result for page_url in tqdm(get_section_pages(soup, url), desc='Parsing pages'): ps = BeautifulSoup(download_page(page_url), "html.parser") if not ps: continue for item in ps.findAll('a', {'class': 'itemname'}): try: result.append((PORTAL_NAME + item['href'], item.contents[0].strip(), item.span.string)) except (LookupError, AttributeError): continue return result def save_program_to_db(site, program, version, sql_connection): sql_connection.cursor().execute( "INSERT INTO parsed(site, program, version) VALUES(?, ?, ?)", [site, program, version]) def get_section_pages(soup, url): pages = [] page_nums = [] for raw_a in soup.findAll('td', {'class': 'page'}): if not raw_a.text: continue page_num_text = raw_a.text if page_num_text.encode('utf-8').strip() == u'···'.encode('utf-8').strip(): pass else: page_num = int(page_num_text) if page_nums and (page_num - page_nums[-1]) > 1: for i in range(page_nums[-1], page_num + 1): pages.append(url + 'index' + str(i) + ".html") page_nums.append(page_num) pages.append(PORTAL_NAME + str(raw_a.a['href'])) pages = unique(pages) pages.append(url) return pages def search_new_versions_by_db(sql_connection, engine): for sql_row in tqdm(list(sql_connection.cursor().execute("SELECT program, version, site FROM parsed")), desc='Finding updates'): if len(sql_row) < 3: continue target_name, target_version, target_url = sql_row search_page_soup = BeautifulSoup(download_page(engine + target_name), "html.parser") if not search_page_soup: continue yield search_page_soup, target_name, target_version, target_url def compare_versions_download_com(sql_connection, list_params, ver_params, content_index=None): for search_page_soup, target_name, target_version, target_url in search_new_versions_by_db(sql_connection, DOWNLOAD_COM_SEARCH): search_results_soup = search_page_soup.findAll(list_params[0], list_params[1]) for result in search_results_soup[:2]: title = result.findAll('div', {'class': 'title OneLinkNoTx'}) if not title: continue found_name = title[0].string found_url = result.a['href'] if target_name.lower() == found_name.lower(): found_page_soup = BeautifulSoup(download_page(found_url), "html.parser") if not found_page_soup: continue if content_index: found_version = found_page_soup.find(ver_params[0], ver_params[1]).contents[content_index] else: found_version = found_page_soup.find(ver_params[0], ver_params[1]) if found_version: found_version = found_version.string if not target_version or not found_version: continue yield target_name, target_version, found_name, found_version, target_url, found_url def get_next_proxy(): while True: with open("proxy.list", 'r') as f: proxy_list = f.readlines() for proxy in proxy_list: yield f'http://{proxy}'.strip() def compare_versions_softpedia(sql_connection, list_params): for search_page_soup, target_name, target_version, target_url in search_new_versions_by_db(sql_connection, SOFTPEDIA_SEARCH): for result in search_page_soup.findAll(list_params[0], list_params[1])[:2]: found_name = result.a.string found_url = result.a['href'] if target_name.lower() == " ".join(found_name.lower().split(' ')[:-1]): found_page_soup = BeautifulSoup(download_page(found_url), "html.parser") if not found_page_soup: continue found_version = None pattern = re.compile('var spjs_prog_version="(.*?)";') scripts = found_page_soup.findAll('script') for script in scripts: match = pattern.search(str(script.string)) if match: found_version = match.groups()[0] if not target_version or not found_version: continue yield target_name, target_version, found_name, found_version, target_url, found_url def download_page(page_url, num_tries=3, timeout=5, proxy={}, proxy_generator=get_next_proxy()): def change_proxy(message): proxy_address = next(proxy_generator) print(f'{message}. Changing proxy to {proxy_address}') proxy['http'] = proxy_address found_page = '' for _ in range(num_tries): try: found_page = requests.get(page_url, proxies=proxy, timeout=timeout).text except requests.exceptions.Timeout: change_proxy("Timeout") continue except requests.exceptions.ProxyError: change_proxy("Proxy error") continue if not len(found_page): change_proxy("Probably banned") else: break return found_page @click.command() @click.option('--section_url', default='http://soft.mydiv.net/win/cname72/', help='MyDiv section URL.') @click.option('--engine', default='softpedia', help='Where to search') def parse_section(section_url, engine): with sqlite3.connect('example.db') as sql_connection: clear_db(sql_connection) for site, program, version in get_programs_from_section(section_url): save_program_to_db(site, program, version, sql_connection) sql_connection.commit() if engine == 'softpedia': results = compare_versions_softpedia(sql_connection, ('h4', {'class': 'ln'})) elif engine == 'download.com': results = compare_versions_download_com(sql_connection, ('div', {'id': 'search-results'}), ('tr', {'id': 'specsPubVersion'}), 3) else: print("Unknown engine") return 1 create_html_results(engine, results) def clear_db(sql_connection): sql_connection.cursor().execute("DELETE FROM parsed") sql_connection.commit() def create_html_results(engine, results): with dominate.document(title=engine) as doc: with doc.add(table()) as data_table: attr(border=2) table_header = tr() table_header += th("MyDiv") table_header += th("Version") table_header += th("Search result") table_header += th("Version") data_table.add(table_header) try: for target_name, target_version, found_name, found_version, target_url, found_url in results: try: if LooseVersion(target_version.split()[0]) < LooseVersion(found_version.split()[0]): data_row = tr() data_row += td(a(target_name, href=target_url)) data_row += td(target_version) data_row += td(a(found_name, href=found_url)) data_row += td(found_version) data_table.add(data_row) print("On MyDiv %s %s, on search %s %s " % (target_name, target_version, found_name, found_version)) except TypeError: print(f"Version comparison failed on {target_version} and {found_version}") finally: with open(engine + ".html", "w") as f: f.write(doc.render()) def _main(): parse_section() if __name__ == '__main__': exit(_main())
The 2013 Beaujolais Nouveau will be released Thursday, November 21. This date is mandated by French law, which states that the year’s Beaujolais Nouveau may be released at 12:01 a.m. on the third Thursday of November. Expect Beaujolais Nouveau to have a bright red-violet hue, light body, almost no tannins and strong fruity flavors reminiscent of candied cherries, red plums and licorice. To enhance these flavors and tame acidity, Beaujolais Nouveau should be served slightly chilled.
import h5py import numpy as np from .results import Results, VERSION_RESULTS from openmc.checkvalue import check_filetype_version, check_value __all__ = ["ResultsList"] class ResultsList(list): """A list of openmc.deplete.Results objects It is recommended to use :meth:`from_hdf5` over direct creation. """ @classmethod def from_hdf5(cls, filename): """Load in depletion results from a previous file Parameters ---------- filename : str Path to depletion result file Returns ------- new : ResultsList New instance of depletion results """ with h5py.File(str(filename), "r") as fh: check_filetype_version(fh, 'depletion results', VERSION_RESULTS[0]) new = cls() # Get number of results stored n = fh["number"][...].shape[0] for i in range(n): new.append(Results.from_hdf5(fh, i)) return new def get_atoms(self, mat, nuc, nuc_units="atoms", time_units="s"): """Get number of nuclides over time from a single material .. note:: Initial values for some isotopes that do not appear in initial concentrations may be non-zero, depending on the value of :class:`openmc.deplete.Operator` ``dilute_initial``. The :class:`openmc.deplete.Operator` adds isotopes according to this setting, which can be set to zero. Parameters ---------- mat : str Material name to evaluate nuc : str Nuclide name to evaluate nuc_units : {"atoms", "atom/b-cm", "atom/cm3"}, optional Units for the returned concentration. Default is ``"atoms"`` .. versionadded:: 0.12 time_units : {"s", "min", "h", "d"}, optional Units for the returned time array. Default is ``"s"`` to return the value in seconds. .. versionadded:: 0.12 Returns ------- times : numpy.ndarray Array of times in units of ``time_units`` concentrations : numpy.ndarray Concentration of specified nuclide in units of ``nuc_units`` """ check_value("time_units", time_units, {"s", "d", "min", "h"}) check_value("nuc_units", nuc_units, {"atoms", "atom/b-cm", "atom/cm3"}) times = np.empty_like(self, dtype=float) concentrations = np.empty_like(self, dtype=float) # Evaluate value in each region for i, result in enumerate(self): times[i] = result.time[0] concentrations[i] = result[0, mat, nuc] # Unit conversions if time_units == "d": times /= (60 * 60 * 24) elif time_units == "h": times /= (60 * 60) elif time_units == "min": times /= 60 if nuc_units != "atoms": # Divide by volume to get density concentrations /= self[0].volume[mat] if nuc_units == "atom/b-cm": # 1 barn = 1e-24 cm^2 concentrations *= 1e-24 return times, concentrations def get_reaction_rate(self, mat, nuc, rx): """Get reaction rate in a single material/nuclide over time .. note:: Initial values for some isotopes that do not appear in initial concentrations may be non-zero, depending on the value of :class:`openmc.deplete.Operator` ``dilute_initial`` The :class:`openmc.deplete.Operator` adds isotopes according to this setting, which can be set to zero. Parameters ---------- mat : str Material name to evaluate nuc : str Nuclide name to evaluate rx : str Reaction rate to evaluate Returns ------- times : numpy.ndarray Array of times in [s] rates : numpy.ndarray Array of reaction rates """ times = np.empty_like(self, dtype=float) rates = np.empty_like(self, dtype=float) # Evaluate value in each region for i, result in enumerate(self): times[i] = result.time[0] rates[i] = result.rates[0].get(mat, nuc, rx) * result[0, mat, nuc] return times, rates def get_eigenvalue(self): """Evaluates the eigenvalue from a results list. Returns ------- times : numpy.ndarray Array of times in [s] eigenvalues : numpy.ndarray k-eigenvalue at each time. Column 0 contains the eigenvalue, while column 1 contains the associated uncertainty """ times = np.empty_like(self, dtype=float) eigenvalues = np.empty((len(self), 2), dtype=float) # Get time/eigenvalue at each point for i, result in enumerate(self): times[i] = result.time[0] eigenvalues[i] = result.k[0] return times, eigenvalues def get_depletion_time(self): """Return an array of the average time to deplete a material .. note:: Will have one fewer row than number of other methods, like :meth:`get_eigenvalues`, because no depletion is performed at the final transport stage Returns ------- times : numpy.ndarray Vector of average time to deplete a single material across all processes and materials. """ times = np.empty(len(self) - 1) # Need special logic because the predictor # writes EOS values for step i as BOS values # for step i+1 # The first proc_time may be zero if self[0].proc_time > 0.0: items = self[:-1] else: items = self[1:] for ix, res in enumerate(items): times[ix] = res.proc_time return times
Visiting Paris is a luxurious opportunity to soak in Parisian culture, history and most importantly food. If you're a big foodie and want to broaden your knowledge of French cuisine whilst you're in the country then you may want to consider French food tours or cooking classes. Food tours are a display of the culture's varied cuisine. They range in price and usually last a few hours. People on the tour will see real, genuine local Parisian food from local bakeries and restaurants rather than the basic tourist-focused places. Those on the tour may get a chance to sample the local food, learn about the local history and get stuck in to some workshops. The Foodist is a Parisian company offering tours that focus on different parts of French cuisine. These include: chocolate, cheese, gourmet, food tours in paris and bread. The private tours here typically last for two hours and costs around €260 for two adults with a €40 deposit to pay. A tour of an authentic French market is also available through another company. After a walk through the French market and seeing some of the beautiful sights of Paris, a cooking class follows using fresh French ingredients. This tour lasts for four hours and costs €165 per person. If you'd like to also experience cooking classes after a wonderful tour around Paris, there are many different options. Learn how to perfect croissants in just three hours. You'll get to take your croissants home afterwards! Or you could learn how to master French sauces within three hours. Learn how to make vinaigrette, mayonnaise, port wine sauce, red wine and shallot sauce, chocolate sauce and more. You could always take on the challenging macaron if you're brave enough. Within three hours learn how to use Italian and French meringue in order to produce the authentic macaron. You'll also become accustomed with the traditional fillings: buttercream, ganache and compote.
# -*- coding: utf-8 -*- # # AWL simulator - instructions # # Copyright 2012-2014 Michael Buesch <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # from __future__ import division, absolute_import, print_function, unicode_literals from awlsim.common.compat import * from awlsim.core.instructions.main import * #@nocy from awlsim.core.operators import * #from awlsim.core.instructions.main cimport * #@cy class AwlInsn_SSD(AwlInsn): #+cdef __slots__ = () def __init__(self, cpu, rawInsn): AwlInsn.__init__(self, cpu, AwlInsn.TYPE_SSD, rawInsn) self.assertOpCount((0, 1)) if self.ops: self.ops[0].assertType(AwlOperator.IMM, 0, 255) def run(self): #@cy cdef S7StatusWord s s = self.cpu.statusWord accu1 = self.cpu.accu1.getSignedDWord() if self.ops: count = self.ops[0].value else: count = self.cpu.accu2.getByte() if count <= 0: return count = min(count, 32) s.A1, s.A0, s.OV = (accu1 >> (count - 1)) & 1, 0, 0 accu1 >>= count self.cpu.accu1.setDWord(accu1)
Kensington Publishing, with editor Lin Carter, revived the pulp in 1980. To bring it out of the pulp stigma they turned to the paperback format. Like Moskowitz before him, Carter only made it to four issues. Unlike the second incarnation's problem with interest, the paperback project fell afoul of contractual problems over the title and ceased operation. Title rights would plague potential publishers and editors for some time to come. . The House Without Mirrors..........David H. Keller, M.D. The House Without Mirriors – David H. Keller, M. D. WEIRD TALES was the first and most famous of all the fantasy-fiction pulp magazines. It featured tales of the strange, the marvelous, and the supernatural by the finest authors of the macabre and the fantastic, old and new, from its first issue in 1923 until its 279th and last consecutive issue in 1954. Now it is back, with all new stories-and even such an exciting find as Scarlet Tears, a recently discovered and never before published novelette by Robert E. Howard. Over the years many great writers were published in the pages of WEIRD TALES, and now a great tradition is being continued into its second half-century. The Song of the Gallows Tree..........Robert E. Howard . To the Nightshade..........Clark Ashton Smith . There were no issues in the year 1982 . There Are No Ghosts in Catholic Spain..........Ray Bradbury . Copyright © 1998, 2002, 2003 Miskatonic University Press / yankeeclassic.com, all rights reserved . . .
import pickle, sys sys.path.append('../src') import data_io, sim_algo, eval, params ## run wordfiles = [#'../data/paragram_sl999_small.txt', # need to download it from John Wieting's github (https://github.com/jwieting/iclr2016) '../data/glove.840B.300d.txt' # need to download it first ] weightfile = '../auxiliary_data/enwiki_vocab_min200.txt' weightparas = [-1, 1e-3]#[-1,1e-1,1e-2,1e-3,1e-4] rmpcs = [0,1]# [0,1,2] params = params.params() parr4para = {} sarr4para = {} for wordfile in wordfiles: (words, We) = data_io.getWordmap(wordfile) for weightpara in weightparas: word2weight = data_io.getWordWeight(weightfile, weightpara) weight4ind = data_io.getWeight(words, word2weight) for rmpc in rmpcs: print 'word vectors loaded from %s' % wordfile print 'word weights computed from %s using parameter a=%f' % (weightfile, weightpara) params.rmpc = rmpc print 'remove the first %d principal components' % rmpc ## eval just one example dataset parr, sarr = eval.sim_evaluate_one(We, words, weight4ind, sim_algo.weighted_average_sim_rmpc, params) ## eval all datasets; need to obtained datasets from John Wieting (https://github.com/jwieting/iclr2016) # parr, sarr = eval.sim_evaluate_all(We, words, weight4ind, sim_algo.weighted_average_sim_rmpc, params) paras = (wordfile, weightfile, weightpara, rmpc) parr4para[paras] = parr sarr4para[paras] = sarr ## save results save_result = False #True result_file = 'result/sim_sif.result' comment4para = [ # need to align with the following loop ['word vector files', wordfiles], # comments and values, ['weight parameters', weightparas], ['remove principal component or not', rmpcs] ] if save_result: with open(result_file, 'w') as f: pickle.dump([parr4para, sarr4para, comment4para] , f)
Don't let your stomach growls scare the people around you and fill up at Tian Fang instead. Choose from an Oshi aglio olio or a Porku carbonara then savour nasi lemak roast pork and Geng Chow pork belly bao. End the meal with the crispy chocolate punch dessert. Treat a loved one to this meal for two and enjoy great company over good food. Valid Mon – Fri: 11am – 10pm, Sat – Sun: 9am – 10pm. Valid on public holidays.
import pprint import logging import viv_utils import viv_utils.emulator_drivers g_pp = pprint.PrettyPrinter() class CallArgumentMonitor(viv_utils.emulator_drivers.Monitor): """ collect call arguments to a target function during emulation """ def __init__(self, vw, target_fva): """ :param target_fva: address of function whose arguments to monitor """ viv_utils.emulator_drivers.Monitor.__init__(self, vw) self._fva = target_fva self._calls = {} def apicall(self, emu, op, pc, api, argv): rv = self.getStackValue(emu, 0) if pc == self._fva: self._calls[rv] = argv def getCalls(self): """ get map of return value of function call to arguments to function call """ return self._calls.copy() def emulate_function(vw, fva, target_fva): """ run the given function while collecting arguments to a target function """ emu = vw.getEmulator() d = viv_utils.emulator_drivers.FunctionRunnerEmulatorDriver(emu) m = CallArgumentMonitor(vw, target_fva) d.add_monitor(m) d.runFunction(fva, maxhit=1) for k, v in m.getCalls().iteritems(): print(hex(k) + ": " + str(v)) def _main(bin_path, ofva): fva = int(ofva, 0x10) logging.basicConfig(level=logging.DEBUG) vw = viv_utils.getWorkspace(bin_path) index = viv_utils.InstructionFunctionIndex(vw) # optimization: avoid re-processing the same function repeatedly called_fvas = set([]) for callerva in vw.getCallers(fva): callerfva = index[callerva] # the address of the function that contains this instruction if callerfva in called_fvas: continue emulate_function(vw, index[callerva], fva) called_fvas.add(callerfva) return def main(): import sys sys.exit(_main(*sys.argv[1:])) if __name__ == "__main__": main()
San Jose, Netsuite And Acumatica Competitor-Cloud ERP For Sale On BizBen. For Best Results & Response, Email & Phone This Contact: Rick Carlson at 833-776-6682 x786. According to the United States Department of Trade, Small and Medium Sized Enterprises (SME's) are the backbone of the economy in the United States. In the US nearly half of the private work force (47.5%) is employed by a Small to Medium Sized Business. While early ERP systems were offered as only On-Premise deployment, for the majority of SME's this model was not attractive enough in pricing to justify its benefit. Additionally, ERP systems are challenging to configure, where still today almost all of the available solutions (Cloud or On-Premise) have been more tailored for large sized enterprises (typically Fortune 1000 Companies, and Companies near and above $300M in annual revenue). With the emergence of the internet and much more efficient Cloud infrastructures, it is only a matter of time that a majority SME's will adopt an ERP solution and will only consider those which are Cloud based. In addition, NetSuite, as the Cloud ERP pioneer, has been able to prove success and verify market receptiveness to a Cloud ERP offering, while simultaneously leaving many SME's unfulfilled or left behind in their need for an effective, easy-to-use and adequately priced ERP system. Overall, an ERP solution is inherently difficult to create, where in transitioning to the Cloud the software must be coded, designed and re-written from scratch, creating a very strong barrier to entry for a new market participant. The Company has spent 6+ years and nearly $7MM in creating this fully-integrated, end-to-end ERP software now ready for wide-scale roll out and implementation. As generational business Owners and Users (Millennials) today perceive SaaS solutions as conventional, they are becoming less accustomed to On-Premise deployments and more accustomed to and comfortable with solutions based in the Cloud .This solution with the use of the most current technologies, a state-of-the-art Cloud infrastructure, and three decades of understanding ERP while gaining a deep acumen of the marketplace for wholesale distribution, offers an opportunity going into the future to gain a major portion of this SME market if not become the leader of Cloud-based ERP solutions for SME's. The Company in 2016 began a Channel Development initiative, credentialing partnerships with many global firms and value added resellers in driving growth for the firm; insofar partnerships with a number of well-positioned firms on an international scale have been either established or are in progress. NDA is required for comprehensive CIM and any other due diligence documents on file with us. Reason for Selling: Comprehensive package on file covers this, NDA is required. To get more information about this Netsuite And Acumatica Competitor-Cloud ERP business for sale in San Jose, Santa Clara County please phone Rick Carlson (Agent - BRE License #:01820673) at 833-776-6682 x786 - if you get voicemail please leave a detailed message - make sure you mention you saw this posting/ad #247023 on BizBen.com. In addition to phoning Rick Carlson, make sure you email Rick Carlson above to get more info about this Netsuite And Acumatica Competitor-Cloud ERP, to ask any questions or request an appointment to see this business for sale, franchise, or opportunity. Thank you.
import jinja2 import os import shutil _ROOT = os.path.abspath(os.path.dirname(__file__)) def template_string(template, values): template = os.path.join(_ROOT, template) f = open(template, 'r') e = jinja2.Environment( loader=jinja2.FileSystemLoader(os.path.dirname(template)) ) t = e.from_string(f.read().decode('utf-8')) f.close() return t.render(values) def template_file(template, values, out): template = os.path.join(_ROOT, template) r = template_string(template, values) path = os.path.dirname(out) if not os.path.exists(path): os.makedirs(path) f = open(out, 'w') f.write(r.encode('utf-8') + "\n") f.truncate() f.close() def template_dir(dir, values, out): dir = os.path.join(_ROOT, dir) templated_files = [] for root, dirs, files in os.walk(dir): for name in files: path = os.path.join(root, name) out_path = out + path.replace(dir, '') out_dir = os.path.dirname(out_path) if not os.path.exists(out_dir): os.makedirs(out_dir) if name.endswith(".j2"): out_path = out_path.replace('.j2', '') template_file(path, values, out_path) else: if os.path.exists(out_path): os.remove(out_path) shutil.copy(path, out_path) templated_files.append(out_path) return templated_files
Open in October 2016 after several months of construction, Just South of North Massage Therapy is located in Thompson Township/Esko, MN. The peaceful rural location provides an extra boost to your level of relaxation before you even enter. Parking is conveniently located right in front of the door. I loved Just South of North and I will definitely be back. The custom session is build to address your needs by combining therapeutic modalities to help achieve your goals. Pediatric massage is designed for children ages 2-17 years. This no charge consultation is a time for us to meet and discuss a plan that would be best for your child. Designed for children ages 2-17 years, this modality may include movement, music, reading, or games as needed to make your child comfortable with touch/massage. Parents are required to be present at every session. Please schedule a consult if this is your child's first visit. This is to schedule a time if you need to pick up gift certificates. I am not always in my office, and when I am I may be with another client. Scheduling a time for pick-up allows me to be there for you. Service Received: Custom Therapeutic Massage with Teresa S. Service Received: Hot Stone Massage with Teresa S. I have TMJ and had just been to the dentist and she got my jaw to relax and the headache assoiated with it went away! Service Received: Reiki with Teresa S. Reiki is very relaxing and calming! Teresa is very nice and friendly as well!! She explained what she was going to do before the massage. She was very knowledgeable about my sore areas and did not hurt me when she was working on them. I will definitely be going again! My massage was amazing. I will definitely be back! Service Received: Bodywork with Teresa S. I must do this more regularly for myself. I look forward to my next appointment. It's wonderful therapy! Thank you Teresa! I always leave so relaxed and ready to sleep! Teresa is great! She always leaves me with a tip or stretch to work on my trouble areas! Would love peppermint oil???? You were amazing and I will def be back! Everything was great. You do an excellent job. Thank you!???? Hope to see you again in the future. Teresa provided a reiki session for me. I felt very relaxed, what felt like 15 minutes was actually an hour. It is difficult to describe how I felt the rest of the day, other than settled. I felt very settled. Thank you, it was a wonderful experience! Very nice facility and explained everything I needed to know very well. Overall great experience and will go back again. Professional, relaxing, and great atmosphere. Hit stone massage is my favorite. It really relieved the tension. Highly recommend! Thank you for the wonderful massage! Teresa was very easy to book and was thorough in getting background information prior to the massage. Her business is beautifully set up with aromatherapy, soothing music, comfy linens and table, very soothing. She was excellent with the massage and professional in her analysis of what areas needed extra care. I am grateful for getting this massage time. I plan to return for more massages. Thanks Teresa! I liked the fact that I was able to have just my problem areas worked on. Very clean and nice office and treatment room. Very calming atmosphere for massage with muted lights, spa music. I've been several times; each time the massage was tailored for my needs at the time and each was very professional and relaxing. I will be going back!
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ tip : 1. // 定位根节点 tip : 2. / 往下层寻找 tip : 3. /text() 提取文本内容 其实就是把其中的内容转换为 str tip : 4. /@xxx 提取属性内容 """ from lxml import etree import re import requests import logging import json logging.basicConfig(level=logging.INFO) # import sys # reload(sys) # sys.setdefaulten html = requests.get('http://www.qiushibaike.com') # html2 = requests('GET', 'http://www.qiushibaike.com') # print(html.text) selector = etree.HTML(html.text) content = selector.xpath('//*[@id="content-left"]/div') print(type(content)) print(len(content)) # print(content) print('------') for each in content: cc = each.xpath('div[@class="content"]/text()') # 在查找出来的那些 div 依次寻找content # 这里面解析出来的内容 遇到节点(<br>等)分离其中的元素了 # print('out cc %s' % cc) if len(cc) >= 1: # cc.replace('\n', '') # cc.replace('\n', 'python') dd = [] for str in cc: # print('str first %s ' % str) # print(len(str)) str = str.strip('\n') dd.append(str) # print('str after %s ' % str) # print(len(str)) # print('in cc %s' % cc) print('\n'.join(dd)) # print(json.loads(cc[0])) # print(json.loads(cc[0].replace('<br>', ''))) # print(json.loads(cc[0].replace(' ', ''))) # print(type(dd)) # print(len(dd)) # print(dd) # //*[@id="qiushi_tag_115320313"] //*[@id="content-left"] //*[@id="qiushi_tag_115320313"] # //*[@id="qiushi_tag_115320313"]/div[2] article block untagged mb15 //*[@id="qiushi_tag_115320153"]/div[2] # xpath 学习 lhtml = ''' <?xml version="1.0" encoding="ISO-8859-1"?> <bookstore> <book> <title lang="eng">Harry Potter</title> <price>29.99</price> </book> <book> <title lang="eng">Learning XML</title> <price>39.95</price> </book> </bookstore> ''' lselector = etree.HTML(lhtml) content1 = lselector.xpath('bookstore') # 选取此节点的所有子节点 # content1 = lselector.xpath('/html') # 从根节点开始选取(绝对路径). 感觉一般不会这么干 # content1 = lselector.xpath('//section') # // 表示从后面的开始选择 不管位置. # content1 = lselector.xpath('//section/.') # // .选取当前节点 # content1 = lselector.xpath('//section/..') # //.. 表示选择上层节点 print(type(content1)) print(len(content1)) for each in content1: print(each.text)
Jerry Botha is the managing partner of Tax Consulting SA, a fully independent firm of tax practitioners, admitted attorneys and chartered accountants in South Africa. Jerry holds his own FSB License, is a Master Reward Practitioner and serves on the Executive of the South African Reward Association (SARA). He chairs the SARA Employee Benefit Committee and is a Certified Payroll Practitioner and Tax Practitioner. He is the managing partner of Tax Consulting that specialises in employee remuneration, personal tax, employee benefits, employees' tax and more complex areas of taxation.
# coding:utf-8 # http://blog.csdn.net/sdj222555/article/details/7970466 class SimHash: def __init__(self, tokens='', bits=128): self.bits = bits self.hash = self.simhash(tokens) def __str__(self): return str(self.hash) def simhash(self, tokens): v = [0] * self.bits for t in [self.string_hash(x) for x in tokens]: for i in range(self.bits): bitmask = 1 << i if t & bitmask: v[i] += 1 else: v[i] -= 1 fingerprint = 0 for i in range(self.bits): if v[i] >= 0: fingerprint += 1 << i return fingerprint def hamming_distance(self, other): x = (self.hash ^ other.hash) & ((1 << self.bits) - 1) cnt = 0 while x: cnt += 1 x &= x - 1 return cnt def similarity(self, other): a = float(self.hash) b = float(other.hash) if a > b: return b / a else: return a / b def string_hash(self, source): if source == '': return 0 else: x = ord(source[0]) << 7 m = 1000003 mask = 2 ** self.bits - 1 for c in source: x = ((x * m) ^ ord(c)) & mask x ^= len(source) if x == -1: x = -2 return x if __name__ == '__main__': pass
Last weekend, precisely on Sunday, May 29, 2016, billionaire oil magnate and Super Big Boy, Prince Dapo Abiodun celebrated his 56th birthday at an impressive ceremony. Abiodun, the Chairman of Hyden Oil, was a Senatorial candidate during the last general election. He is one of the most formidable businessmen in Nigeria. Abiodun is widely admired and regarded for his humane disposition and exploits in industry and philanthropy. This duly earned him the national honour- Member of the Niger, MON by the Federal Government of Nigeria and interminable accolades from his peers and even his business rivals. corporate, political and social circles in Nigeria. The party was held at his Temple Road, Ikoyi, residence. Although it was planned to be a low-keyed celebration, things gathered pace and assumed greater dimension as the celebrant’s home began began to be filled with almost everyone who mattered in Nigeria’s high society. Friends and foes alike, (except of course, Senator Buruji Kashamu), stormed his new mansion that day and had undiluted fun. The celebrant’s parents were also in attendance at the birthday party. Notable among those who graced the party were, Ogun State governor, Senator Ibikunle Amosun, Minister of Transport, Rt. Hon. Rotimi Amaechi, Senator Lanre Tejuosho, Senator Gbolahan Dada, Hon. Femi Gbajabiamila, Aliko Dangote, Otunba Gbenga Daniel, Dr. Wale Babalakin, Dr Jide Idris, Doyin Adelabu, Kashim Imam, Niyi Adebayo, Prince Bolu Akin-Olugbade, Otunba Bimbo Ashiru, Willie and Nkiru Anumudu, Otunba Funsho, Lawal, Jide and Sola Coker, Timi Alaibe and Sola Adeeyo. Some other notable guests at the party were; Lai Oriowo, Bayo Abdul, Capt. Francis Ogboro, Lanre Ogunlesi, Muyiwa Bakare, Sammie Omai, Tunde Ayeni, Eyimofe and Dorothy Atake, Biola Akinola, Anslem Tanbasi, Lape Adebayo, Yemi Benson, Titi Ogunbanjo, Abi Kuku, Akogun Lanre Alfred, Aare Kayode Alfred, Dayo Adeneye, Shina Peller and Sijibomi Ogundele (Sujimoto). Other dignitaries at the event included prominent power-brokers, oil magnates, bank chiefs, and industry titans with stakes across the country’s business sectors. There is no doubting the fact, that Dapo Abiodun’s celebrity-studded birthday party, was reminiscent of 40th birthday of Terry ‘Versace’ Waya, which took place in London in 2001. Waya’s birthday drew a heavy presence of some part-loving governors, who subsequently earned the infamous sobriquet, The Owambe Governors.
import numpy import h5py import os import tempfile import cProfile import pstats def h5py_create(filename, datadict, compression): '''Create a new HDF5 file called "filename" and save the values of "datadict" into it using its keys as the dataset names; create an attribute called "compression" holding the value of "compression" parameter.''' f = h5py.File(filename, mode="w") attrvalue = "nothing interesting for now" f.attrs.create("top-level-attribute", attrvalue, dtype="S{x}".format(x=len(attrvalue))) for name,value in datadict.items(): ds = f.create_dataset(name, data=value, compression=compression, chunks=True) ds.attrs.create("compression", str(compression), dtype="S{x}".format(x=len(str(compression)))) return def szip_available(): '''Try to create a dataset using szip: return True if succeeds, False on ValueError (szip not available) and raise on others.''' import tempfile tempf = tempfile.NamedTemporaryFile(dir=".") f = h5py.File(tempf.name,"w") try: f.create_dataset("foo", shape=(10,10), dtype="f8", compression="szip") except ValueError: ret = False else: ret = True finally: f.close() return ret data=numpy.random.random((1000,1000,100)) tempfiles = [tempfile.NamedTemporaryFile(dir=".") for i in [0,1,2,3]] cps = [cProfile.Profile() for i in range(len(tempfiles))] if (szip_available()): comp="szip" else: comp="gzip" runs = [None] + 3*[comp] for i,r in enumerate(runs): if (i==2): data[100:900,100:900,30:70]=0.0 if (i==3): data = numpy.ones((1000,1000,100), dtype=numpy.float64) cps[i].runcall(h5py_create, tempfiles[i].name, {"array_called_data":data}, r) print('''Time spent writing hdf5 data and file sizes: uncompressed random data: {uncompt:g}\t{uncomps} {comp} compressed random data: {compt:g}\t{comps} {comp} compressed semirandom data: {semit:g}\t{semis} {comp} compressed zeros: {zerot:g}\t{zeros}'''.format( uncomps=os.stat(tempfiles[0].name).st_size, comps=os.stat(tempfiles[1].name).st_size, semis=os.stat(tempfiles[2].name).st_size, zeros=os.stat(tempfiles[3].name).st_size, uncompt=pstats.Stats(cps[0]).total_tt, compt=pstats.Stats(cps[1]).total_tt, semit=pstats.Stats(cps[2]).total_tt, zerot=pstats.Stats(cps[3]).total_tt, comp=comp ))
Send sunny flowers from GOGGANS FLORIST to brighten their day and show your love and appreciation. Call today or order this SEASON FOR SUNFLOWERS floral arrangement now from our website. Send Sparkle and Celebrate with Anticipation!Beautiful blue and white flowers from GOGGANS FLORIST symbolize the miracles of the season, so surprise someone today with a bouquet of vibrant blooms. Feel free to call us or order flowers online 24/7. Surprise someone with A Beautiful Sight Arrangement of white flowers. Feel free to call GOGGANS FLORIST or order flowers online 24/7. Rake in an abundance of warm smiles with this breathtaking Canyon Sunset arrangement from GOGGANS FLORIST. Blooming with beautiful fall flowers, it's a lovely way to add a touch of fall festivity. Call or send Fall Flowers online today!